repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
680k
|
---|---|---|---|---|
pooja-bs-3003/Project_21 | prgm6.py | dc46e66ccf10937be6f2f8369ef02eb52e139eff | str1= input("enter a string :")
l1 =""
for i in str1 [::-1]:
l1 = i+l1
print(l1)
if str1 == l1:
print("string is a palindrome")
else :
print("string is not a palindrome")
| [] |
Pandaaaa906/product_spider | product_spider/spiders/jk_spider.py | cc7f865f53fd3ed68f4869be3ba917c8373dfcf2 | import json
import re
from string import ascii_uppercase
from time import time
from urllib.parse import urljoin
import scrapy
from more_itertools import first
from scrapy import Request
from product_spider.items import JkProduct, JKPackage
from product_spider.utils.functions import strip
class JkPrdSpider(scrapy.Spider):
name = "jk"
allowed_domains = ["jkchemical.com"]
base_url = "http://www.jkchemical.com"
start_urls = map(lambda x: "http://www.jkchemical.com/CH/products/index/ProductName/{0}.html".format(x),
ascii_uppercase)
prd_size_url = "http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}"
def parse(self, response):
for xp_url in response.xpath("//div[@class='yy toa']//a/@href"):
tmp_url = self.base_url + xp_url.extract()
yield Request(tmp_url.replace("EN", "CH"), callback=self.parse_list)
def parse_list(self, response):
xp_boxes = response.xpath("//table[@id]//div[@class='PRODUCT_box']")
for xp_box in xp_boxes:
div = xp_box.xpath(".//div[2][@class='left_right mulu_text']")
brand = strip(div.xpath('.//li[@id="ctl00_cph_Content_li_lt_Brand"]/text()').get(), '')
rel_url = div.xpath('.//a[@class="name"]/@href').get()
img_url = div.xpath('.//img/@src').get()
d = {
'brand': brand.replace('-', '') or None,
"purity": div.xpath(".//li[1]/text()").get('').split(u":")[-1].strip(),
"cas": strip(div.xpath(".//li[2]//a/text()").get()),
"cat_no": div.xpath(".//li[4]/text()").get().split(u":")[-1].strip(),
"en_name": strip(xp_box.xpath(".//a[@class='name']/text()").get()),
"cn_name": strip(xp_box.xpath(".//a[@class='name']//span[1]/text()").get()),
'prd_url': rel_url and urljoin(response.url, rel_url),
'img_url': img_url and urljoin(response.url, img_url),
}
data_jkid = xp_box.xpath(".//div[@data-jkid]/@data-jkid").get()
data_cid = xp_box.xpath(".//div[@data-cid]/@data-cid").get()
yield Request(self.prd_size_url.format(value=data_jkid, cid=data_cid, ts=int(time())),
body=u"",
meta={"prd_data": d},
callback=self.parse_package)
next_page = response.xpath('//a[contains(text(), "下一页")]/@href').get()
if next_page:
yield Request(urljoin(response.url, next_page), callback=self.parse_list)
def parse_package(self, response):
s = re.findall(r"(?<=\().+(?=\))", response.text)[0]
packages = json.loads(s)
d = response.meta.get('prd_data', {})
package = first(packages, {})
if package:
d['brand'] = d['brand'] or package.get('Product', {}).get('BrandName')
yield JkProduct(**d)
for package_obj in packages:
catalog_price = package_obj.get("CatalogPrice", {})
dd = {
'brand': d.get('brand'),
'cat_no': d.get('cat_no'),
'package': package_obj.get("stringFormat"),
'price': catalog_price and catalog_price.get('Value'),
'currency': catalog_price and strip(catalog_price.get('Currency')),
'attrs': json.dumps(package_obj),
}
yield JKPackage(**dd)
| [((2746, 2759), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2756, 2759), False, 'import json\n'), ((2824, 2843), 'more_itertools.first', 'first', (['packages', '{}'], {}), '(packages, {})\n', (2829, 2843), False, 'from more_itertools import first\n'), ((2678, 2724), 're.findall', 're.findall', (['"""(?<=\\\\().+(?=\\\\))"""', 'response.text'], {}), "('(?<=\\\\().+(?=\\\\))', response.text)\n", (2688, 2724), False, 'import re\n'), ((2961, 2975), 'product_spider.items.JkProduct', 'JkProduct', ([], {}), '(**d)\n', (2970, 2975), False, 'from product_spider.items import JkProduct, JKPackage\n'), ((3420, 3443), 'json.dumps', 'json.dumps', (['package_obj'], {}), '(package_obj)\n', (3430, 3443), False, 'import json\n'), ((3477, 3492), 'product_spider.items.JKPackage', 'JKPackage', ([], {}), '(**dd)\n', (3486, 3492), False, 'from product_spider.items import JkProduct, JKPackage\n'), ((1933, 1963), 'urllib.parse.urljoin', 'urljoin', (['response.url', 'rel_url'], {}), '(response.url, rel_url)\n', (1940, 1963), False, 'from urllib.parse import urljoin\n'), ((2004, 2034), 'urllib.parse.urljoin', 'urljoin', (['response.url', 'img_url'], {}), '(response.url, img_url)\n', (2011, 2034), False, 'from urllib.parse import urljoin\n'), ((2566, 2598), 'urllib.parse.urljoin', 'urljoin', (['response.url', 'next_page'], {}), '(response.url, next_page)\n', (2573, 2598), False, 'from urllib.parse import urljoin\n'), ((2289, 2295), 'time.time', 'time', ([], {}), '()\n', (2293, 2295), False, 'from time import time\n')] |
byq-luo/Lane_change_RL | env/LaneChangeEnv_v2.py | 3409238db939e6722441219b4c2dc66033611069 | import os
import sys
import random
import datetime
import gym
from gym import spaces
import numpy as np
from env.IDM import IDM
from env.Road import Road
from env.Vehicle import Vehicle
import math
# add sumo/tools into python environment
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
print('success')
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
######################################################################
# simulation environments
class LaneChangeEnv(gym.Env):
def __init__(self, id=None, traffic=1, gui=False, seed=None):
# todo check traffic flow density
if traffic == 0:
# average 9 vehicles
self.cfg = 'C:/Users/Fei Ye/Desktop/map/ramp3/mapFree.sumo.cfg'
elif traffic == 2:
# average 19 vehicles
self.cfg = 'C:/Users/Fei Ye/Desktop/map/ramp3/mapDense.sumo.cfg'
else:
# average 14 vehicles
self.cfg = 'C:/Users/Fei Ye/Desktop/map/ramp3/map.sumo.cfg'
# arguments must be string, if float/int, must be converted to str(float/int), instead of '3.0'
self.sumoBinary = "/usr/local/Cellar/sumo/1.2.0/bin/sumo"
self.sumoCmd = ['-c', self.cfg,
# '--lanechange.duration', str(3), # using 'Simple Continuous lane-change model'
'--lateral-resolution', str(0.8), # using 'Sublane-Model'
'--step-length', str(0.1),
'--default.action-step-length', str(0.1)]
# randomness
if seed is None:
self.sumoCmd += ['--random']
else:
self.sumoCmd += ['--seed', str(seed)]
# gui
if gui is True:
self.sumoBinary += '-gui'
self.sumoCmd = [self.sumoBinary] + self.sumoCmd + ['--quit-on-end', str(True),
'--start', str(True)]
else:
self.sumoCmd = [self.sumoBinary] + self.sumoCmd
traci.start(self.sumoCmd)
self.rd = Road()
self.timestep = 0
self.dt = traci.simulation.getDeltaT()
self.randomseed = None
self.sumoseed = None
self.veh_dict = {}
self.vehID_tuple_all = ()
self.egoID = id
self.ego = None
# self.tgtLane = tgtlane
self.is_success = False
self.collision_num = 0
self.lateral_action = 2
# self.observation = [[0, 0, 0], # ego lane position and speed
# [0, 0, 0], # leader
# [0, 0, 0], # target lane leader
# [0, 0, 0]] # target lane follower
self.observation = np.empty(20)
self.reward = None # (float) : amount of reward returned after previous action
self.done = True # (bool): whether the episode has ended, in which case further step() calls will return undefined results
self.info = {
'resetFlag': 0} # (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
self.action_space = spaces.Discrete(6)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(20,))
def update_veh_dict(self, veh_id_tuple):
for veh_id in veh_id_tuple:
if veh_id not in self.veh_dict.keys():
self.veh_dict[veh_id] = Vehicle(veh_id, self.rd)
for veh_id in list(self.veh_dict.keys()):
if veh_id not in veh_id_tuple:
self.veh_dict.pop(veh_id)
for veh_id in list(self.veh_dict.keys()):
self.veh_dict[veh_id].update_info(self.rd, self.veh_dict)
def _updateObservationSingle(self, name, veh):
"""
:param name: 0:ego; 1:leader; 2:target leader; 3:target follower
:param id: vehicle id corresponding to name
:return:
"""
if veh is not None:
self.observation[name * 4 + 0] = veh.lanePos
self.observation[name * 4 + 1] = veh.speed
self.observation[name * 4 + 2] = veh.pos_lat
self.observation[name * 4 + 3] = veh.acce
else:
self.observation[name * 4 + 0] = self.observation[0] + 300.
self.observation[name * 4 + 1] = self.observation[1]
self.observation[name * 4 + 2] = 4.8
self.observation[name * 4 + 3] = 0
# todo check if rational
def updateObservation(self):
self.observation[0] = self.ego.lanePos
self.observation[1] = self.ego.speed
self.observation[2] = self.ego.pos_lat
self.observation[3] = self.ego.acce
self._updateObservationSingle(1, self.ego.orig_leader)
self._updateObservationSingle(2, self.ego.orig_follower)
self._updateObservationSingle(3, self.ego.trgt_leader)
self._updateObservationSingle(4, self.ego.trgt_follower)
# self.observation = np.array(self.observation).flatten()
# print(self.observation.shape)
def updateReward(self):
return -self.ego.dis2tgtLane
def updateReward2(self):
wc1 = 1
wc2 = 1
wt = 1
ws = 1
we = 1
# reward related to comfort
r_comf = wc1 * self.ego.acce ** 2 + wc2 * self.ego.delta_acce ** 2
# reward related to efficiency
r_time = - wt * self.timestep
r_speed = ws * (self.ego.speed - self.ego_speedLimit)
r_effi = we * self.ego.dis2tgtLane / self.ego.dis2entrance
r_effi_all = r_time + r_speed + r_effi
# reward related to safety
w_lateral = 1
w_longi = 1
if self.ego.leaderID is not None:
# compute longitudinal time gap
delta_V = self.veh_dict[self.ego.leaderID].speed - self.ego.speed
delta_A = self.veh_dict[self.ego.leaderID].acce - self.ego.acce
if delta_A == 0:
TTC = - abs(self.ego.leaderDis)/delta_V
else:
TTC = -delta_V - math.sqrt(delta_V**2 + 2*delta_A * self.ego.leaderDis)
TTC = TTC/delta_A
if self.lateral_action != 1 and 0 < TTC < 2:
r_long_c = - math.exp(-2*TTC+5)
else:
r_long_c = 0
if self.lateral_action == 0: #abort lane change
alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2
assert 0 <= alpha <= 1.1
r_lat_c = -math.exp(-4*alpha+5)
else:
r_lat_c = 0
if self.ego.targetLeaderID is not None:
# compute longitudinal time gap
delta_V2 = self.veh_dict[self.ego.targetLeaderID].speed - self.ego.speed
delta_A2 = self.veh_dict[self.ego.targetLeaderID].acce - self.ego.acce
delta_D2 = self.veh_dict[self.ego.targetLeaderID].lanePos - self.ego.lanePos
if delta_A2 == 0:
TTC2 = - abs(delta_D2) / delta_V2
else:
TTC2 = -delta_V2 - math.sqrt(delta_V2 ** 2 + 2 * delta_A2 * delta_D2)
TTC2 = TTC2 / delta_A2
if self.lateral_action == 1 and 0 < TTC2 < 2:
r_long_t = - math.exp(-2 * TTC2 + 5)
else:
r_long_t = 0
if self.lateral_action == 1: # lane change
alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2
assert 0 <= alpha <= 1.1
r_lat_t = -math.exp(-4*alpha+5)
else:
r_lat_t = 0
r_safe = w_lateral * (r_lat_c + r_lat_t) + w_longi * (r_long_c+ r_long_t)
#
# if self.ego.leaderID is not None:
# # ('lateralPos2leader', abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat))
# alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2
# assert 0 <= alpha <= 1.1
# r_safe_leader = w_lateral * alpha + w_longi * (1 - alpha) * abs(self.ego.leaderDis)
# else:
# r_safe_leader = 0
# if self.ego.targetLeaderID is not None:
# # print('lateralPos2tgtleader', abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat))
# alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2
# # print('alpha', alpha)
# assert 0 <= alpha <= 1.1
#
# r_safe_tgtleader = w_lateral * alpha + w_longi * (1 - alpha) * abs(
# self.ego.lanePos - self.veh_dict[self.ego.targetLeaderID].lanePos)
# else:
# r_safe_tgtleader = 0
#
#
# r_safe = r_safe_leader + r_safe_tgtleader
# total reward
r_total = r_comf + r_effi_all + r_safe
return r_total
def is_done(self):
# lane change successfully executed, episode ends, reset env
# todo modify
if self.is_success:
self.done = True
# print('reset on: successfully lane change, dis2targetlane:',
# self.ego.dis2tgtLane)
# too close to ramp entrance
if self.ego.dis2entrance < 10.0:
self.done = True
# print('reset on: too close to ramp entrance, dis2targetlane:',
# self.ego.dis2tgtLane)
# ego vehicle out of env
if self.egoID not in self.vehID_tuple_all:
self.done = True
# print('reset on: self.ego not in env:', self.egoID not in self.vehID_tuple_all)
# collision occurs
self.collision_num = traci.simulation.getCollidingVehiclesNumber()
if self.collision_num > 0:
self.done = True
# print('reset on: self.collision_num:', self.collision_num)
def preStep(self):
traci.simulationStep()
self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID)
self.update_veh_dict(self.vehID_tuple_all)
def step(self, action=2):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, call `reset()` outside env!! to reset this
environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): longitudinal0: action[0] = 1: accelerate
action[0] = -1: decelerate
action[0] = 0: use SUMO default
action[0] = others: acce = 0.0
longitudinal1: action[0] = 0: follow original lane leader
action[0] = 1: follow closer leader
longitudinal2: action[0] = 0: follow original lane leader
action[0] = 1: follow target lane leader
**important**: orginal/target lane leader will not change despite the lateral position of
the ego may change
lateral: action[1] = 1: lane change
action[1] = 0: abort lane change, change back to original lane
action[1] = 2: keep in current lateral position
Returns:
described in __init__
"""
action_longi = action // 3
action_lateral = action % 3
self.lateral_action = action_lateral
# action_longi = action[0]
# action_lateral = action[1]
assert self.done is False, 'self.done is not False'
assert action is not None, 'action is None'
assert self.egoID in self.vehID_tuple_all, 'vehicle not in env'
self.timestep += 1
# lateral control-------------------------
# episode in progress; 0:change back to original line; 1:lane change to target lane; 2:keep current
# lane change to target lane
if not self.is_success:
if action_lateral == 1: # and abs(self.ego.pos_lat - (0.5+self.ego.targetLane)*self.rd.laneWidth) > 0.01:
self.is_success = self.ego.changeLane(True, self.ego.trgt_laneIndex, self.rd)
# print('posLat', self.ego.pos_lat, 'lane', self.ego.curr_laneIndex, 'rdWdith', self.rd.laneWidth)
# print('right', -(self.ego.pos_lat - 0.5*self.rd.laneWidth))
# abort lane change, change back to ego's original lane
if action_lateral == 0: # and abs(self.ego.pos_lat - (0.5+self.ego.origLane)*self.rd.laneWidth) > 0.01:
self.is_success = self.ego.changeLane(True, self.ego.orig_laneIndex, self.rd)
# print('left', 1.5 * self.rd.laneWidth - self.ego.pos_lat)
# keep current lateral position
if action_lateral == 2:
self.is_success = self.ego.changeLane(True, -1, self.rd)
# longitudinal control2---------------------
acceNext = self.ego.updateLongitudinalSpeedIDM(action_longi)
# print(acceNext)
vNext = self.ego.speed + acceNext * 0.1
traci.vehicle.setSpeed(self.egoID, vNext)
# update info------------------------------
traci.simulationStep()
self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID)
self.update_veh_dict(self.vehID_tuple_all)
# check if episode ends
self.is_done()
if self.done is True:
self.info['resetFlag'] = True
return self.observation, 0.0, self.done, self.info
else:
self.updateObservation()
self.reward = self.updateReward()
return self.observation, self.reward, self.done, self.info
def seed(self, seed=None):
if seed is None:
self.randomseed = datetime.datetime.now().microsecond
else:
self.randomseed = seed
random.seed(self.randomseed)
def reset(self, egoid, tlane=0, tfc=1, is_gui=True, sumoseed=None, randomseed=None):
"""
reset env
:param id: ego vehicle id
:param tfc: int. 0:light; 1:medium; 2:dense
:return: initial observation
"""
self.seed(randomseed)
if sumoseed is None:
self.sumoseed = self.randomseed
traci.close()
self.__init__(id=egoid, traffic=tfc, gui=is_gui, seed=self.sumoseed)
# continue step until ego appears in env
if self.egoID is not None:
while self.egoID not in self.veh_dict.keys():
# must ensure safety in preStpe
self.preStep()
if self.timestep > 5000:
raise Exception('cannot find ego after 5000 timesteps')
assert self.egoID in self.vehID_tuple_all, "cannot start training while ego is not in env"
self.done = False
self.ego = self.veh_dict[self.egoID]
self.ego.trgt_laneIndex = tlane
self.ego.is_ego = 1
# set ego vehicle speed mode
traci.vehicle.setSpeedMode(self.ego.veh_id, 0)
self.ego_speedFactor = traci.vehicle.getSpeedFactor(egoid)
self.ego_speedLimit = self.ego_speedFactor * traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID))
self.ego.idm_obj = IDM()
self.ego.idm_obj.__init__(self.ego_speedLimit)
self.ego.update_info(self.rd, self.veh_dict)
self.updateObservation()
return self.observation
return
def close(self):
traci.close()
| [((282, 328), 'os.path.join', 'os.path.join', (["os.environ['SUMO_HOME']", '"""tools"""'], {}), "(os.environ['SUMO_HOME'], 'tools')\n", (294, 328), False, 'import os\n'), ((333, 355), 'sys.path.append', 'sys.path.append', (['tools'], {}), '(tools)\n', (348, 355), False, 'import sys\n'), ((387, 446), 'sys.exit', 'sys.exit', (['"""please declare environment variable \'SUMO_HOME\'"""'], {}), '("please declare environment variable \'SUMO_HOME\'")\n', (395, 446), False, 'import sys\n'), ((2097, 2122), 'traci.start', 'traci.start', (['self.sumoCmd'], {}), '(self.sumoCmd)\n', (2108, 2122), False, 'import traci\n'), ((2142, 2148), 'env.Road.Road', 'Road', ([], {}), '()\n', (2146, 2148), False, 'from env.Road import Road\n'), ((2193, 2221), 'traci.simulation.getDeltaT', 'traci.simulation.getDeltaT', ([], {}), '()\n', (2219, 2221), False, 'import traci\n'), ((2800, 2812), 'numpy.empty', 'np.empty', (['(20)'], {}), '(20)\n', (2808, 2812), True, 'import numpy as np\n'), ((3213, 3231), 'gym.spaces.Discrete', 'spaces.Discrete', (['(6)'], {}), '(6)\n', (3228, 3231), False, 'from gym import spaces\n'), ((3265, 3314), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(20,)'}), '(low=-np.inf, high=np.inf, shape=(20,))\n', (3275, 3314), False, 'from gym import spaces\n'), ((9695, 9740), 'traci.simulation.getCollidingVehiclesNumber', 'traci.simulation.getCollidingVehiclesNumber', ([], {}), '()\n', (9738, 9740), False, 'import traci\n'), ((9910, 9932), 'traci.simulationStep', 'traci.simulationStep', ([], {}), '()\n', (9930, 9932), False, 'import traci\n'), ((9964, 10020), 'traci.edge.getLastStepVehicleIDs', 'traci.edge.getLastStepVehicleIDs', (['self.rd.entranceEdgeID'], {}), '(self.rd.entranceEdgeID)\n', (9996, 10020), False, 'import traci\n'), ((13256, 13297), 'traci.vehicle.setSpeed', 'traci.vehicle.setSpeed', (['self.egoID', 'vNext'], {}), '(self.egoID, vNext)\n', (13278, 13297), False, 'import traci\n'), ((13359, 13381), 'traci.simulationStep', 'traci.simulationStep', ([], {}), '()\n', (13379, 13381), False, 'import traci\n'), ((13413, 13469), 'traci.edge.getLastStepVehicleIDs', 'traci.edge.getLastStepVehicleIDs', (['self.rd.entranceEdgeID'], {}), '(self.rd.entranceEdgeID)\n', (13445, 13469), False, 'import traci\n'), ((14059, 14087), 'random.seed', 'random.seed', (['self.randomseed'], {}), '(self.randomseed)\n', (14070, 14087), False, 'import random\n'), ((14455, 14468), 'traci.close', 'traci.close', ([], {}), '()\n', (14466, 14468), False, 'import traci\n'), ((15703, 15716), 'traci.close', 'traci.close', ([], {}), '()\n', (15714, 15716), False, 'import traci\n'), ((15196, 15242), 'traci.vehicle.setSpeedMode', 'traci.vehicle.setSpeedMode', (['self.ego.veh_id', '(0)'], {}), '(self.ego.veh_id, 0)\n', (15222, 15242), False, 'import traci\n'), ((15278, 15313), 'traci.vehicle.getSpeedFactor', 'traci.vehicle.getSpeedFactor', (['egoid'], {}), '(egoid)\n', (15306, 15313), False, 'import traci\n'), ((15463, 15468), 'env.IDM.IDM', 'IDM', ([], {}), '()\n', (15466, 15468), False, 'from env.IDM import IDM\n'), ((3488, 3512), 'env.Vehicle.Vehicle', 'Vehicle', (['veh_id', 'self.rd'], {}), '(veh_id, self.rd)\n', (3495, 3512), False, 'from env.Vehicle import Vehicle\n'), ((13966, 13989), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13987, 13989), False, 'import datetime\n'), ((6097, 6155), 'math.sqrt', 'math.sqrt', (['(delta_V ** 2 + 2 * delta_A * self.ego.leaderDis)'], {}), '(delta_V ** 2 + 2 * delta_A * self.ego.leaderDis)\n', (6106, 6155), False, 'import math\n'), ((6274, 6296), 'math.exp', 'math.exp', (['(-2 * TTC + 5)'], {}), '(-2 * TTC + 5)\n', (6282, 6296), False, 'import math\n'), ((6564, 6588), 'math.exp', 'math.exp', (['(-4 * alpha + 5)'], {}), '(-4 * alpha + 5)\n', (6572, 6588), False, 'import math\n'), ((7117, 7167), 'math.sqrt', 'math.sqrt', (['(delta_V2 ** 2 + 2 * delta_A2 * delta_D2)'], {}), '(delta_V2 ** 2 + 2 * delta_A2 * delta_D2)\n', (7126, 7167), False, 'import math\n'), ((7295, 7318), 'math.exp', 'math.exp', (['(-2 * TTC2 + 5)'], {}), '(-2 * TTC2 + 5)\n', (7303, 7318), False, 'import math\n'), ((7591, 7615), 'math.exp', 'math.exp', (['(-4 * alpha + 5)'], {}), '(-4 * alpha + 5)\n', (7599, 7615), False, 'import math\n'), ((15394, 15429), 'traci.vehicle.getLaneID', 'traci.vehicle.getLaneID', (['self.egoID'], {}), '(self.egoID)\n', (15417, 15429), False, 'import traci\n')] |
jblukach/distillery | cidr/o365/o365.py | 4087debb496d7dfc4c425c2e68246e1b0726168b | import boto3
import ipaddress
import json
import logging
import os
import requests
import uuid
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
client = boto3.client('ssm')
def downloader(instance, latest, parameter, link):
r = requests.get(link)
cidrs = r.json()
if r.status_code == 200:
for cidr in cidrs:
try:
if len(cidr['ips']) != 0:
for ip in cidr['ips']:
sortkey = 'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip
hostmask = ip.split('/')
iptype = ipaddress.ip_address(hostmask[0])
nametype = 'IPv'+str(iptype.version)+'#'
if nametype == 'IPv4#':
netrange = ipaddress.IPv4Network(ip)
first, last = netrange[0], netrange[-1]
firstip = int(ipaddress.IPv4Address(first))
lastip = int(ipaddress.IPv4Address(last))
elif nametype == 'IPv6#':
netrange = ipaddress.IPv6Network(ip)
first, last = netrange[0], netrange[-1]
firstip = int(ipaddress.IPv6Address(first))
lastip = int(ipaddress.IPv6Address(last))
table.put_item(
Item= {
'pk': nametype,
'sk': sortkey,
'service': cidr['serviceArea'],
'cidr': ip,
'created': latest,
'endpoint': instance,
'firstip': firstip,
'lastip': lastip
}
)
except:
pass
logger.info('o365 '+instance+' IP Ranges Updated')
response = client.put_parameter(
Name = parameter,
Value = str(latest),
Type = 'String',
Overwrite = True
)
def handler(event, context):
r = requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4()))
logger.info('Link Status Code: '+str(r.status_code))
if r.status_code == 200:
versions = r.json()
logger.info(versions)
for version in versions:
if version['instance'] == 'Worldwide':
response = client.get_parameter(Name=os.environ['WORLD_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 Worldwide IP Ranges')
link = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['WORLD_PARAMETER'], link)
elif version['instance'] == 'USGovDoD':
response = client.get_parameter(Name=os.environ['DOD_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 USGovDoD IP Ranges')
link = 'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['DOD_PARAMETER'], link)
elif version['instance'] == 'USGovGCCHigh':
response = client.get_parameter(Name=os.environ['HIGH_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 USGovGCCHigh IP Ranges')
link = 'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['HIGH_PARAMETER'], link)
elif version['instance'] == 'China':
response = client.get_parameter(Name=os.environ['CHINA_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 China IP Ranges')
link = 'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['CHINA_PARAMETER'], link)
elif version['instance'] == 'Germany':
response = client.get_parameter(Name=os.environ['GERMANY_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 Germany IP Ranges')
link = 'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['GERMANY_PARAMETER'], link)
else:
logger.info('No o365 IP Range Updates')
return {
'statusCode': 200,
'body': json.dumps('Download o365 IP Ranges')
}
| [((105, 124), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (122, 124), False, 'import logging\n'), ((167, 193), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (181, 193), False, 'import boto3\n'), ((261, 280), 'boto3.client', 'boto3.client', (['"""ssm"""'], {}), "('ssm')\n", (273, 280), False, 'import boto3\n'), ((346, 364), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (358, 364), False, 'import requests\n'), ((5422, 5459), 'json.dumps', 'json.dumps', (['"""Download o365 IP Ranges"""'], {}), "('Download o365 IP Ranges')\n", (5432, 5459), False, 'import json\n'), ((2416, 2428), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2426, 2428), False, 'import uuid\n'), ((713, 746), 'ipaddress.ip_address', 'ipaddress.ip_address', (['hostmask[0]'], {}), '(hostmask[0])\n', (733, 746), False, 'import ipaddress\n'), ((899, 924), 'ipaddress.IPv4Network', 'ipaddress.IPv4Network', (['ip'], {}), '(ip)\n', (920, 924), False, 'import ipaddress\n'), ((3031, 3043), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3041, 3043), False, 'import uuid\n'), ((1035, 1063), 'ipaddress.IPv4Address', 'ipaddress.IPv4Address', (['first'], {}), '(first)\n', (1056, 1063), False, 'import ipaddress\n'), ((1106, 1133), 'ipaddress.IPv4Address', 'ipaddress.IPv4Address', (['last'], {}), '(last)\n', (1127, 1133), False, 'import ipaddress\n'), ((1224, 1249), 'ipaddress.IPv6Network', 'ipaddress.IPv6Network', (['ip'], {}), '(ip)\n', (1245, 1249), False, 'import ipaddress\n'), ((3568, 3580), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3578, 3580), False, 'import uuid\n'), ((1360, 1388), 'ipaddress.IPv6Address', 'ipaddress.IPv6Address', (['first'], {}), '(first)\n', (1381, 1388), False, 'import ipaddress\n'), ((1431, 1458), 'ipaddress.IPv6Address', 'ipaddress.IPv6Address', (['last'], {}), '(last)\n', (1452, 1458), False, 'import ipaddress\n'), ((4116, 4128), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4126, 4128), False, 'import uuid\n'), ((4645, 4657), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4655, 4657), False, 'import uuid\n'), ((5183, 5195), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5193, 5195), False, 'import uuid\n')] |
zulip/finbot | exampleinc.py | dcb6bfe54a674f4ff98370677a648b6cc1706e16 | #!/usr/bin/python
from money import *
c = Company("Example Inc")
c.add_flow(FixedCost("Initial Cash", -500000))
c.add_flow(FixedCost("Incorporation", 500))
c.add_flow(ConstantCost("Office", 50000))
c.add_flow(PeriodicCost("Subscription", 4000, "2012-01-05", 14))
c.add_flow(DelayedCost("2012-02-01", ConstantCost("Office", 50000)))
c.add_flow(DelayedCost("2012-02-01", FixedCost("Financing", 50000)))
c.add_flow(SemiMonthlyCost("Payroll", 4000, "2012-01-01"))
c.add_flow(SemiMonthlyWages("Payroll", 6000, "2012-01-01"))
print(c)
c.cash_monthly_summary("2012-01-01", "2013-07-01")
| [] |
dawid1stanek/guardian | guardian/validators.py | 89359c93d5f36c8b458428e147000352fa7ad01d | #!/usr/bin/env python
import os
import socket
import subprocess
import argparse
import logging
LOGGER = logging.getLogger(__name__)
class ValidatorError(Exception):
pass
def ping(address):
try:
subprocess.check_call(('ping', '-c 1', '-W 1', address), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
LOGGER.info('Ping server %s - OK', address)
except subprocess.CalledProcessError as e:
LOGGER.error('Ping server %s - Failed', address)
raise ValidatorError(e)
ping.short_name = 'PING'
def port(address, port):
s = socket.socket()
try:
s.connect((address, port))
LOGGER.info('Checking port %s:%d - OK', address, port)
except socket.error as e:
LOGGER.error('Checking port %s:%d - Failed', address, port)
raise ValidatorError(e)
port.short_name = 'PORT'
| [((106, 133), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (123, 133), False, 'import logging\n'), ((569, 584), 'socket.socket', 'socket.socket', ([], {}), '()\n', (582, 584), False, 'import socket\n'), ((216, 325), 'subprocess.check_call', 'subprocess.check_call', (["('ping', '-c 1', '-W 1', address)"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(('ping', '-c 1', '-W 1', address), stdout=subprocess.\n PIPE, stderr=subprocess.PIPE)\n", (237, 325), False, 'import subprocess\n')] |
Open-EO/openeo-geopyspark-driver | tests/data/udf_noop.py | afd5902f426d2aa456d70ed6f2d51b6907de1cab | from openeo.udf import XarrayDataCube
def apply_datacube(cube: XarrayDataCube, context: dict) -> XarrayDataCube:
return cube
| [] |
LHGames-2017/superintelligence | StateGoHome.py | bd9ea3d444e571a0f9607bf0f6799807f7e644ca | from PlayerState import *
from pathFinder import PathFinder
from StateLook4Resources import *
class StateGoHome(PlayerState):
""" State Implementation: has a resource and go back home """
def __init__(self, player):
self.player = player
self.player.setTarget(self.player.playerData.HouseLocation)
def doAction(self):
origin = self.player.playerData.Position
target = self.player.target
moves = PathFinder(self.player.mapView).getPath(origin, target)
# If player just gave the resource home, look 4 resources again
if(not self.player.hasResources()):
self.player.state = StateLook4Resources(self.player)
return create_purchase_action(0)
return create_move_action(moves[0])
def toString():
return "StateGoHome"
| [((450, 481), 'pathFinder.PathFinder', 'PathFinder', (['self.player.mapView'], {}), '(self.player.mapView)\n', (460, 481), False, 'from pathFinder import PathFinder\n')] |
schwendp/hoomd-blue | hoomd/mpcd/test-py/stream_slit_test.py | df7970121b19bc4f8674348ab3241055ac87153b | # Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
# Maintainer: mphoward
import unittest
import numpy as np
import hoomd
from hoomd import md
from hoomd import mpcd
# unit tests for mpcd slit streaming geometry
class mpcd_stream_slit_test(unittest.TestCase):
def setUp(self):
# establish the simulation context
hoomd.context.initialize()
# set the decomposition in z for mpi builds
if hoomd.comm.get_num_ranks() > 1:
hoomd.comm.decomposition(nz=2)
# default testing configuration
hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.)))
# initialize the system from the starting snapshot
snap = mpcd.data.make_snapshot(N=2)
snap.particles.position[:] = [[4.95,-4.95,3.85],[0.,0.,-3.8]]
snap.particles.velocity[:] = [[1.,-1.,1.],[-1.,-1.,-1.]]
self.s = mpcd.init.read_snapshot(snap)
mpcd.integrator(dt=0.1)
# test creation can happen (with all parameters set)
def test_create(self):
mpcd.stream.slit(H=4., V=0.1, boundary="no_slip", period=2)
# test for setting parameters
def test_set_params(self):
slit = mpcd.stream.slit(H=4.)
self.assertAlmostEqual(slit.H, 4.)
self.assertAlmostEqual(slit.V, 0.)
self.assertEqual(slit.boundary, "no_slip")
self.assertAlmostEqual(slit._cpp.geometry.getH(), 4.)
self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.)
self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip)
# change H and also ensure other parameters stay the same
slit.set_params(H=2.)
self.assertAlmostEqual(slit.H, 2.)
self.assertAlmostEqual(slit.V, 0.)
self.assertEqual(slit.boundary, "no_slip")
self.assertAlmostEqual(slit._cpp.geometry.getH(), 2.)
self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.)
self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip)
# change V
slit.set_params(V=0.1)
self.assertAlmostEqual(slit.V, 0.1)
self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.1)
# change BCs
slit.set_params(boundary="slip")
self.assertEqual(slit.boundary, "slip")
self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip)
# test for invalid boundary conditions being set
def test_bad_boundary(self):
slit = mpcd.stream.slit(H=4.)
slit.set_params(boundary="no_slip")
slit.set_params(boundary="slip")
with self.assertRaises(ValueError):
slit.set_params(boundary="invalid")
# test basic stepping behavior with no slip boundary conditions
def test_step_noslip(self):
mpcd.stream.slit(H=4.)
# take one step
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.])
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.])
# take another step where one particle will now hit the wall
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.])
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.])
# take another step, wrapping the second particle through the boundary
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [4.95,-4.95,3.85])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.])
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [1.,1.,1.])
def test_step_moving_wall(self):
mpcd.stream.slit(H=4., boundary="no_slip", V=1.0, period=3)
# change velocity of lower particle so it is translating relative to wall
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
snap.particles.velocity[1] = [-2.,-1.,-1.]
self.s.restore_snapshot(snap)
# run one step and check bounce back of particles
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
# the first particle is matched exactly to the wall speed, and so it will translate at
# same velocity along +x for 3 steps. It will bounce back in y and z to where it started.
# (vx stays the same, and vy and vz flip.)
np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,-4.95,3.85])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,1.,-1.])
# the second particle has y and z velocities flip again, and since it started closer,
# it moves relative to original position.
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.4,-0.1,-3.9])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.,1.,1.])
# test basic stepping behavior with slip boundary conditions
def test_step_slip(self):
mpcd.stream.slit(H=4., boundary="slip")
# take one step
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.])
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.])
# take another step where one particle will now hit the wall
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.85,4.85,3.95])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.])
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.])
# take another step, wrapping the second particle through the boundary
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,4.75,3.85])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.])
np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.3,-0.3,-3.9])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,1.])
# test that setting the slit size too large raises an error
def test_validate_box(self):
# initial configuration is invalid
slit = mpcd.stream.slit(H=10.)
with self.assertRaises(RuntimeError):
hoomd.run(1)
# now it should be valid
slit.set_params(H=4.)
hoomd.run(2)
# make sure we can invalidate it again
slit.set_params(H=10.)
with self.assertRaises(RuntimeError):
hoomd.run(1)
# test that particles out of bounds can be caught
def test_out_of_bounds(self):
slit = mpcd.stream.slit(H=3.8)
with self.assertRaises(RuntimeError):
hoomd.run(1)
slit.set_params(H=3.85)
hoomd.run(1)
# test that virtual particle filler can be attached, removed, and updated
def test_filler(self):
# initialization of a filler
slit = mpcd.stream.slit(H=4.)
slit.set_filler(density=5., kT=1.0, seed=42, type='A')
self.assertTrue(slit._filler is not None)
# run should be able to setup the filler, although this all happens silently
hoomd.run(1)
# changing the geometry should still be OK with a run
slit.set_params(V=1.0)
hoomd.run(1)
# changing filler should be allowed
slit.set_filler(density=10., kT=1.5, seed=7)
self.assertTrue(slit._filler is not None)
hoomd.run(1)
# assert an error is raised if we set a bad particle type
with self.assertRaises(RuntimeError):
slit.set_filler(density=5., kT=1.0, seed=42, type='B')
# assert an error is raised if we set a bad density
with self.assertRaises(RuntimeError):
slit.set_filler(density=-1.0, kT=1.0, seed=42)
# removing the filler should still allow a run
slit.remove_filler()
self.assertTrue(slit._filler is None)
hoomd.run(1)
def tearDown(self):
del self.s
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| [((9563, 9600), 'unittest.main', 'unittest.main', ([], {'argv': "['test.py', '-v']"}), "(argv=['test.py', '-v'])\n", (9576, 9600), False, 'import unittest\n'), ((440, 466), 'hoomd.context.initialize', 'hoomd.context.initialize', ([], {}), '()\n', (464, 466), False, 'import hoomd\n'), ((816, 844), 'hoomd.mpcd.data.make_snapshot', 'mpcd.data.make_snapshot', ([], {'N': '(2)'}), '(N=2)\n', (839, 844), False, 'from hoomd import mpcd\n'), ((997, 1026), 'hoomd.mpcd.init.read_snapshot', 'mpcd.init.read_snapshot', (['snap'], {}), '(snap)\n', (1020, 1026), False, 'from hoomd import mpcd\n'), ((1036, 1059), 'hoomd.mpcd.integrator', 'mpcd.integrator', ([], {'dt': '(0.1)'}), '(dt=0.1)\n', (1051, 1059), False, 'from hoomd import mpcd\n'), ((1153, 1213), 'hoomd.mpcd.stream.slit', 'mpcd.stream.slit', ([], {'H': '(4.0)', 'V': '(0.1)', 'boundary': '"""no_slip"""', 'period': '(2)'}), "(H=4.0, V=0.1, boundary='no_slip', period=2)\n", (1169, 1213), False, 'from hoomd import mpcd\n'), ((1294, 1317), 'hoomd.mpcd.stream.slit', 'mpcd.stream.slit', ([], {'H': '(4.0)'}), '(H=4.0)\n', (1310, 1317), False, 'from hoomd import mpcd\n'), ((2616, 2639), 'hoomd.mpcd.stream.slit', 'mpcd.stream.slit', ([], {'H': '(4.0)'}), '(H=4.0)\n', (2632, 2639), False, 'from hoomd import mpcd\n'), ((2926, 2949), 'hoomd.mpcd.stream.slit', 'mpcd.stream.slit', ([], {'H': '(4.0)'}), '(H=4.0)\n', (2942, 2949), False, 'from hoomd import mpcd\n'), ((2982, 2994), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (2991, 2994), False, 'import hoomd\n'), ((3523, 3535), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (3532, 3535), False, 'import hoomd\n'), ((4075, 4087), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (4084, 4087), False, 'import hoomd\n'), ((4582, 4642), 'hoomd.mpcd.stream.slit', 'mpcd.stream.slit', ([], {'H': '(4.0)', 'boundary': '"""no_slip"""', 'V': '(1.0)', 'period': '(3)'}), "(H=4.0, boundary='no_slip', V=1.0, period=3)\n", (4598, 4642), False, 'from hoomd import mpcd\n'), ((4962, 4974), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (4971, 4974), False, 'import hoomd\n'), ((5936, 5976), 'hoomd.mpcd.stream.slit', 'mpcd.stream.slit', ([], {'H': '(4.0)', 'boundary': '"""slip"""'}), "(H=4.0, boundary='slip')\n", (5952, 5976), False, 'from hoomd import mpcd\n'), ((6009, 6021), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (6018, 6021), False, 'import hoomd\n'), ((6550, 6562), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (6559, 6562), False, 'import hoomd\n'), ((7102, 7114), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (7111, 7114), False, 'import hoomd\n'), ((7721, 7745), 'hoomd.mpcd.stream.slit', 'mpcd.stream.slit', ([], {'H': '(10.0)'}), '(H=10.0)\n', (7737, 7745), False, 'from hoomd import mpcd\n'), ((7888, 7900), 'hoomd.run', 'hoomd.run', (['(2)'], {}), '(2)\n', (7897, 7900), False, 'import hoomd\n'), ((8155, 8178), 'hoomd.mpcd.stream.slit', 'mpcd.stream.slit', ([], {'H': '(3.8)'}), '(H=3.8)\n', (8171, 8178), False, 'from hoomd import mpcd\n'), ((8291, 8303), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (8300, 8303), False, 'import hoomd\n'), ((8462, 8485), 'hoomd.mpcd.stream.slit', 'mpcd.stream.slit', ([], {'H': '(4.0)'}), '(H=4.0)\n', (8478, 8485), False, 'from hoomd import mpcd\n'), ((8692, 8704), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (8701, 8704), False, 'import hoomd\n'), ((8807, 8819), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (8816, 8819), False, 'import hoomd\n'), ((8976, 8988), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (8985, 8988), False, 'import hoomd\n'), ((9474, 9486), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (9483, 9486), False, 'import hoomd\n'), ((531, 557), 'hoomd.comm.get_num_ranks', 'hoomd.comm.get_num_ranks', ([], {}), '()\n', (555, 557), False, 'import hoomd\n'), ((575, 605), 'hoomd.comm.decomposition', 'hoomd.comm.decomposition', ([], {'nz': '(2)'}), '(nz=2)\n', (599, 605), False, 'import hoomd\n'), ((3044, 3065), 'hoomd.comm.get_rank', 'hoomd.comm.get_rank', ([], {}), '()\n', (3063, 3065), False, 'import hoomd\n'), ((3084, 3174), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[0]', '[-4.95, 4.95, 3.95]'], {}), '(snap.particles.position[0], [-4.95, \n 4.95, 3.95])\n', (3120, 3174), True, 'import numpy as np\n'), ((3180, 3266), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[0]', '[1.0, -1.0, 1.0]'], {}), '(snap.particles.velocity[0], [1.0, -1.0,\n 1.0])\n', (3216, 3266), True, 'import numpy as np\n'), ((3270, 3359), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[1]', '[-0.1, -0.1, -3.9]'], {}), '(snap.particles.position[1], [-0.1, -\n 0.1, -3.9])\n', (3306, 3359), True, 'import numpy as np\n'), ((3365, 3454), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[1]', '[-1.0, -1.0, -1.0]'], {}), '(snap.particles.velocity[1], [-1.0, -\n 1.0, -1.0])\n', (3401, 3454), True, 'import numpy as np\n'), ((3585, 3606), 'hoomd.comm.get_rank', 'hoomd.comm.get_rank', ([], {}), '()\n', (3604, 3606), False, 'import hoomd\n'), ((3625, 3715), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[0]', '[-4.95, 4.95, 3.95]'], {}), '(snap.particles.position[0], [-4.95, \n 4.95, 3.95])\n', (3661, 3715), True, 'import numpy as np\n'), ((3721, 3808), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[0]', '[-1.0, 1.0, -1.0]'], {}), '(snap.particles.velocity[0], [-1.0, 1.0,\n -1.0])\n', (3757, 3808), True, 'import numpy as np\n'), ((3812, 3901), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[1]', '[-0.2, -0.2, -4.0]'], {}), '(snap.particles.position[1], [-0.2, -\n 0.2, -4.0])\n', (3848, 3901), True, 'import numpy as np\n'), ((3907, 3996), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[1]', '[-1.0, -1.0, -1.0]'], {}), '(snap.particles.velocity[1], [-1.0, -\n 1.0, -1.0])\n', (3943, 3996), True, 'import numpy as np\n'), ((4137, 4158), 'hoomd.comm.get_rank', 'hoomd.comm.get_rank', ([], {}), '()\n', (4156, 4158), False, 'import hoomd\n'), ((4177, 4267), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[0]', '[4.95, -4.95, 3.85]'], {}), '(snap.particles.position[0], [4.95, -\n 4.95, 3.85])\n', (4213, 4267), True, 'import numpy as np\n'), ((4273, 4360), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[0]', '[-1.0, 1.0, -1.0]'], {}), '(snap.particles.velocity[0], [-1.0, 1.0,\n -1.0])\n', (4309, 4360), True, 'import numpy as np\n'), ((4364, 4453), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[1]', '[-0.1, -0.1, -3.9]'], {}), '(snap.particles.position[1], [-0.1, -\n 0.1, -3.9])\n', (4400, 4453), True, 'import numpy as np\n'), ((4459, 4544), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[1]', '[1.0, 1.0, 1.0]'], {}), '(snap.particles.velocity[1], [1.0, 1.0,\n 1.0])\n', (4495, 4544), True, 'import numpy as np\n'), ((4774, 4795), 'hoomd.comm.get_rank', 'hoomd.comm.get_rank', ([], {}), '()\n', (4793, 4795), False, 'import hoomd\n'), ((5024, 5045), 'hoomd.comm.get_rank', 'hoomd.comm.get_rank', ([], {}), '()\n', (5043, 5045), False, 'import hoomd\n'), ((5320, 5411), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[0]', '[-4.75, -4.95, 3.85]'], {}), '(snap.particles.position[0], [-4.75, -\n 4.95, 3.85])\n', (5356, 5411), True, 'import numpy as np\n'), ((5417, 5503), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[0]', '[1.0, 1.0, -1.0]'], {}), '(snap.particles.velocity[0], [1.0, 1.0,\n -1.0])\n', (5453, 5503), True, 'import numpy as np\n'), ((5660, 5749), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[1]', '[-0.4, -0.1, -3.9]'], {}), '(snap.particles.position[1], [-0.4, -\n 0.1, -3.9])\n', (5696, 5749), True, 'import numpy as np\n'), ((5755, 5840), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[1]', '[0.0, 1.0, 1.0]'], {}), '(snap.particles.velocity[1], [0.0, 1.0,\n 1.0])\n', (5791, 5840), True, 'import numpy as np\n'), ((6071, 6092), 'hoomd.comm.get_rank', 'hoomd.comm.get_rank', ([], {}), '()\n', (6090, 6092), False, 'import hoomd\n'), ((6111, 6201), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[0]', '[-4.95, 4.95, 3.95]'], {}), '(snap.particles.position[0], [-4.95, \n 4.95, 3.95])\n', (6147, 6201), True, 'import numpy as np\n'), ((6207, 6293), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[0]', '[1.0, -1.0, 1.0]'], {}), '(snap.particles.velocity[0], [1.0, -1.0,\n 1.0])\n', (6243, 6293), True, 'import numpy as np\n'), ((6297, 6386), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[1]', '[-0.1, -0.1, -3.9]'], {}), '(snap.particles.position[1], [-0.1, -\n 0.1, -3.9])\n', (6333, 6386), True, 'import numpy as np\n'), ((6392, 6481), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[1]', '[-1.0, -1.0, -1.0]'], {}), '(snap.particles.velocity[1], [-1.0, -\n 1.0, -1.0])\n', (6428, 6481), True, 'import numpy as np\n'), ((6612, 6633), 'hoomd.comm.get_rank', 'hoomd.comm.get_rank', ([], {}), '()\n', (6631, 6633), False, 'import hoomd\n'), ((6652, 6742), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[0]', '[-4.85, 4.85, 3.95]'], {}), '(snap.particles.position[0], [-4.85, \n 4.85, 3.95])\n', (6688, 6742), True, 'import numpy as np\n'), ((6748, 6835), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[0]', '[1.0, -1.0, -1.0]'], {}), '(snap.particles.velocity[0], [1.0, -1.0,\n -1.0])\n', (6784, 6835), True, 'import numpy as np\n'), ((6839, 6928), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[1]', '[-0.2, -0.2, -4.0]'], {}), '(snap.particles.position[1], [-0.2, -\n 0.2, -4.0])\n', (6875, 6928), True, 'import numpy as np\n'), ((6934, 7023), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[1]', '[-1.0, -1.0, -1.0]'], {}), '(snap.particles.velocity[1], [-1.0, -\n 1.0, -1.0])\n', (6970, 7023), True, 'import numpy as np\n'), ((7164, 7185), 'hoomd.comm.get_rank', 'hoomd.comm.get_rank', ([], {}), '()\n', (7183, 7185), False, 'import hoomd\n'), ((7204, 7294), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[0]', '[-4.75, 4.75, 3.85]'], {}), '(snap.particles.position[0], [-4.75, \n 4.75, 3.85])\n', (7240, 7294), True, 'import numpy as np\n'), ((7300, 7387), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[0]', '[1.0, -1.0, -1.0]'], {}), '(snap.particles.velocity[0], [1.0, -1.0,\n -1.0])\n', (7336, 7387), True, 'import numpy as np\n'), ((7391, 7480), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.position[1]', '[-0.3, -0.3, -3.9]'], {}), '(snap.particles.position[1], [-0.3, -\n 0.3, -3.9])\n', (7427, 7480), True, 'import numpy as np\n'), ((7486, 7574), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['snap.particles.velocity[1]', '[-1.0, -1.0, 1.0]'], {}), '(snap.particles.velocity[1], [-1.0, -\n 1.0, 1.0])\n', (7522, 7574), True, 'import numpy as np\n'), ((7803, 7815), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (7812, 7815), False, 'import hoomd\n'), ((8038, 8050), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (8047, 8050), False, 'import hoomd\n'), ((8237, 8249), 'hoomd.run', 'hoomd.run', (['(1)'], {}), '(1)\n', (8246, 8249), False, 'import hoomd\n'), ((714, 739), 'hoomd.data.boxdim', 'hoomd.data.boxdim', ([], {'L': '(10.0)'}), '(L=10.0)\n', (731, 739), False, 'import hoomd\n')] |
haoyuchen1992/CourseBuilder | tests/functional/model_models.py | ba8f0e05c53cc74bb4e46235a7855fdfbd63dff7 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for models.models."""
__author__ = [
'[email protected] (John Cox)',
]
import datetime
from models import models
from tests.functional import actions
# Disable complaints about docstrings for self-documenting tests.
# pylint: disable-msg=g-missing-docstring
class EventEntityTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly(self):
event = models.EventEntity(source='source', user_id='1')
key = event.put()
exported = event.for_export(self.transform)
self.assert_blacklisted_properties_removed(event, exported)
self.assertEqual('source', event.source)
self.assertEqual('transformed_1', exported.user_id)
self.assertEqual(key, models.EventEntity.safe_key(key, self.transform))
class PersonalProfileTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly_and_sets_safe_key(self):
date_of_birth = datetime.date.today()
email = '[email protected]'
legal_name = 'legal_name'
nick_name = 'nick_name'
user_id = '1'
profile = models.PersonalProfile(
date_of_birth=date_of_birth, email=email, key_name=user_id,
legal_name=legal_name, nick_name=nick_name)
profile.put()
exported = profile.for_export(self.transform)
self.assert_blacklisted_properties_removed(profile, exported)
self.assertEqual(
self.transform(user_id), exported.safe_key.name())
class QuestionDAOTestCase(actions.TestBase):
"""Functional tests for QuestionDAO."""
# Name determined by parent. pylint: disable-msg=g-bad-name
def setUp(self):
"""Sets up datastore contents."""
super(QuestionDAOTestCase, self).setUp()
self.used_twice_question_id = 1
self.used_twice_question_dto = models.QuestionDTO(
self.used_twice_question_id, {})
self.used_once_question_id = 2
self.used_once_question_dto = models.QuestionDTO(
self.used_once_question_id, {})
self.unused_question_id = 3
self.unused_question_dto = models.QuestionDTO(
self.unused_question_id, {})
models.QuestionDAO.save_all([
self.used_twice_question_dto, self.used_once_question_dto,
self.unused_question_dto])
# Handcoding the dicts. This is dangerous because they're handcoded
# elsewhere, the implementations could fall out of sync, and these tests
# may then pass erroneously.
self.first_question_group_description = 'first_question_group'
self.first_question_group_id = 4
self.first_question_group_dto = models.QuestionGroupDTO(
self.first_question_group_id,
{'description': self.first_question_group_description,
'items': [{'question': str(self.used_once_question_id)}]})
self.second_question_group_description = 'second_question_group'
self.second_question_group_id = 5
self.second_question_group_dto = models.QuestionGroupDTO(
self.second_question_group_id,
{'description': self.second_question_group_description,
'items': [{'question': str(self.used_twice_question_id)}]})
self.third_question_group_description = 'third_question_group'
self.third_question_group_id = 6
self.third_question_group_dto = models.QuestionGroupDTO(
self.third_question_group_id,
{'description': self.third_question_group_description,
'items': [{'question': str(self.used_twice_question_id)}]})
models.QuestionGroupDAO.save_all([
self.first_question_group_dto, self.second_question_group_dto,
self.third_question_group_dto])
def test_used_by_returns_description_of_single_question_group(self):
self.assertEqual(
[self.first_question_group_description],
models.QuestionDAO.used_by(self.used_once_question_id))
def test_used_by_returns_descriptions_of_multiple_question_groups(self):
self.assertEqual(
[self.second_question_group_description,
self.third_question_group_description],
models.QuestionDAO.used_by(self.used_twice_question_id))
def test_used_by_returns_empty_list_for_unused_question(self):
not_found_id = 7
self.assertFalse(models.QuestionDAO.load(not_found_id))
self.assertEqual([], models.QuestionDAO.used_by(not_found_id))
class StudentTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly(self):
user_id = '1'
student = models.Student(key_name='name', user_id='1', is_enrolled=True)
key = student.put()
exported = student.for_export(self.transform)
self.assert_blacklisted_properties_removed(student, exported)
self.assertTrue(exported.is_enrolled)
self.assertEqual('transformed_1', exported.user_id)
self.assertEqual(
'transformed_' + user_id, exported.key_by_user_id.name())
self.assertEqual(
models.Student.safe_key(key, self.transform), exported.safe_key)
def test_get_key_does_not_transform_by_default(self):
user_id = 'user_id'
student = models.Student(key_name='name', user_id=user_id)
student.put()
self.assertEqual(user_id, student.get_key().name())
def test_safe_key_transforms_name(self):
key = models.Student(key_name='name').put()
self.assertEqual(
'transformed_name',
models.Student.safe_key(key, self.transform).name())
class StudentAnswersEntityTestCase(actions.ExportTestBase):
def test_safe_key_transforms_name(self):
student_key = models.Student(key_name='name').put()
answers = models.StudentAnswersEntity(key_name=student_key.name())
answers_key = answers.put()
self.assertEqual(
'transformed_name',
models.StudentAnswersEntity.safe_key(
answers_key, self.transform).name())
class StudentPropertyEntityTestCase(actions.ExportTestBase):
def test_safe_key_transforms_user_id_component(self):
user_id = 'user_id'
student = models.Student(key_name='[email protected]', user_id=user_id)
student.put()
property_name = 'property-name'
student_property_key = models.StudentPropertyEntity.create(
student, property_name).put()
self.assertEqual(
'transformed_%s-%s' % (user_id, property_name),
models.StudentPropertyEntity.safe_key(
student_property_key, self.transform).name())
| [((1007, 1055), 'models.models.EventEntity', 'models.EventEntity', ([], {'source': '"""source"""', 'user_id': '"""1"""'}), "(source='source', user_id='1')\n", (1025, 1055), False, 'from models import models\n'), ((1544, 1565), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1563, 1565), False, 'import datetime\n'), ((1707, 1838), 'models.models.PersonalProfile', 'models.PersonalProfile', ([], {'date_of_birth': 'date_of_birth', 'email': 'email', 'key_name': 'user_id', 'legal_name': 'legal_name', 'nick_name': 'nick_name'}), '(date_of_birth=date_of_birth, email=email, key_name=\n user_id, legal_name=legal_name, nick_name=nick_name)\n', (1729, 1838), False, 'from models import models\n'), ((2442, 2493), 'models.models.QuestionDTO', 'models.QuestionDTO', (['self.used_twice_question_id', '{}'], {}), '(self.used_twice_question_id, {})\n', (2460, 2493), False, 'from models import models\n'), ((2585, 2635), 'models.models.QuestionDTO', 'models.QuestionDTO', (['self.used_once_question_id', '{}'], {}), '(self.used_once_question_id, {})\n', (2603, 2635), False, 'from models import models\n'), ((2721, 2768), 'models.models.QuestionDTO', 'models.QuestionDTO', (['self.unused_question_id', '{}'], {}), '(self.unused_question_id, {})\n', (2739, 2768), False, 'from models import models\n'), ((2790, 2909), 'models.models.QuestionDAO.save_all', 'models.QuestionDAO.save_all', (['[self.used_twice_question_dto, self.used_once_question_dto, self.\n unused_question_dto]'], {}), '([self.used_twice_question_dto, self.\n used_once_question_dto, self.unused_question_dto])\n', (2817, 2909), False, 'from models import models\n'), ((4218, 4351), 'models.models.QuestionGroupDAO.save_all', 'models.QuestionGroupDAO.save_all', (['[self.first_question_group_dto, self.second_question_group_dto, self.\n third_question_group_dto]'], {}), '([self.first_question_group_dto, self.\n second_question_group_dto, self.third_question_group_dto])\n', (4250, 4351), False, 'from models import models\n'), ((5242, 5304), 'models.models.Student', 'models.Student', ([], {'key_name': '"""name"""', 'user_id': '"""1"""', 'is_enrolled': '(True)'}), "(key_name='name', user_id='1', is_enrolled=True)\n", (5256, 5304), False, 'from models import models\n'), ((5868, 5916), 'models.models.Student', 'models.Student', ([], {'key_name': '"""name"""', 'user_id': 'user_id'}), "(key_name='name', user_id=user_id)\n", (5882, 5916), False, 'from models import models\n'), ((6828, 6889), 'models.models.Student', 'models.Student', ([], {'key_name': '"""[email protected]"""', 'user_id': 'user_id'}), "(key_name='[email protected]', user_id=user_id)\n", (6842, 6889), False, 'from models import models\n'), ((1342, 1390), 'models.models.EventEntity.safe_key', 'models.EventEntity.safe_key', (['key', 'self.transform'], {}), '(key, self.transform)\n', (1369, 1390), False, 'from models import models\n'), ((4537, 4591), 'models.models.QuestionDAO.used_by', 'models.QuestionDAO.used_by', (['self.used_once_question_id'], {}), '(self.used_once_question_id)\n', (4563, 4591), False, 'from models import models\n'), ((4815, 4870), 'models.models.QuestionDAO.used_by', 'models.QuestionDAO.used_by', (['self.used_twice_question_id'], {}), '(self.used_twice_question_id)\n', (4841, 4870), False, 'from models import models\n'), ((4990, 5027), 'models.models.QuestionDAO.load', 'models.QuestionDAO.load', (['not_found_id'], {}), '(not_found_id)\n', (5013, 5027), False, 'from models import models\n'), ((5058, 5098), 'models.models.QuestionDAO.used_by', 'models.QuestionDAO.used_by', (['not_found_id'], {}), '(not_found_id)\n', (5084, 5098), False, 'from models import models\n'), ((5698, 5742), 'models.models.Student.safe_key', 'models.Student.safe_key', (['key', 'self.transform'], {}), '(key, self.transform)\n', (5721, 5742), False, 'from models import models\n'), ((6059, 6090), 'models.models.Student', 'models.Student', ([], {'key_name': '"""name"""'}), "(key_name='name')\n", (6073, 6090), False, 'from models import models\n'), ((6350, 6381), 'models.models.Student', 'models.Student', ([], {'key_name': '"""name"""'}), "(key_name='name')\n", (6364, 6381), False, 'from models import models\n'), ((6983, 7042), 'models.models.StudentPropertyEntity.create', 'models.StudentPropertyEntity.create', (['student', 'property_name'], {}), '(student, property_name)\n', (7018, 7042), False, 'from models import models\n'), ((6167, 6211), 'models.models.Student.safe_key', 'models.Student.safe_key', (['key', 'self.transform'], {}), '(key, self.transform)\n', (6190, 6211), False, 'from models import models\n'), ((6569, 6634), 'models.models.StudentAnswersEntity.safe_key', 'models.StudentAnswersEntity.safe_key', (['answers_key', 'self.transform'], {}), '(answers_key, self.transform)\n', (6605, 6634), False, 'from models import models\n'), ((7160, 7235), 'models.models.StudentPropertyEntity.safe_key', 'models.StudentPropertyEntity.safe_key', (['student_property_key', 'self.transform'], {}), '(student_property_key, self.transform)\n', (7197, 7235), False, 'from models import models\n')] |
iseessel/audio | torchaudio/functional/functional.py | 64551a69186d28db1f499ba373f1b19c6a7ed894 | # -*- coding: utf-8 -*-
import io
import math
import warnings
from typing import Optional, Tuple
import torch
from torch import Tensor
from torchaudio._internal import module_utils as _mod_utils
import torchaudio
__all__ = [
"spectrogram",
"griffinlim",
"amplitude_to_DB",
"DB_to_amplitude",
"compute_deltas",
"compute_kaldi_pitch",
"create_fb_matrix",
"create_dct",
"compute_deltas",
"detect_pitch_frequency",
"DB_to_amplitude",
"mu_law_encoding",
"mu_law_decoding",
"complex_norm",
"angle",
"magphase",
"phase_vocoder",
'mask_along_axis',
'mask_along_axis_iid',
'sliding_window_cmn',
"spectral_centroid",
"apply_codec",
]
def spectrogram(
waveform: Tensor,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: Optional[float],
normalized: bool,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True
) -> Tensor:
r"""Create a spectrogram or a batch of spectrograms from a raw audio signal.
The spectrogram can be either magnitude-only or complex.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
power (float or None): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
If None, then the complex spectrum is returned instead.
normalized (bool): Whether to normalize by magnitude after stft
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
Default: ``True``
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. Default: ``"reflect"``
onesided (bool, optional): controls whether to return half of results to
avoid redundancy. Default: ``True``
Returns:
Tensor: Dimension (..., freq, time), freq is
``n_fft // 2 + 1`` and ``n_fft`` is the number of
Fourier bins, and time is the number of window hops (n_frame).
"""
if pad > 0:
# TODO add "with torch.no_grad():" back when JIT supports it
waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant")
# pack batch
shape = waveform.size()
waveform = waveform.reshape(-1, shape[-1])
# default values are consistent with librosa.core.spectrum._spectrogram
spec_f = torch.stft(
input=waveform,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
normalized=False,
onesided=onesided,
return_complex=True,
)
# unpack batch
spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:])
if normalized:
spec_f /= window.pow(2.).sum().sqrt()
if power is not None:
if power == 1.0:
return spec_f.abs()
return spec_f.abs().pow(power)
return torch.view_as_real(spec_f)
def griffinlim(
specgram: Tensor,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: float,
normalized: bool,
n_iter: int,
momentum: float,
length: Optional[int],
rand_init: bool
) -> Tensor:
r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.
Implementation ported from `librosa`.
* [1] McFee, Brian, Colin Raffel, Dawen Liang, Daniel PW Ellis, Matt McVicar, Eric Battenberg, and Oriol Nieto.
"librosa: Audio and music signal analysis in python."
In Proceedings of the 14th python in science conference, pp. 18-25. 2015.
* [2] Perraudin, N., Balazs, P., & Søndergaard, P. L.
"A fast Griffin-Lim algorithm,"
IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (pp. 1-4),
Oct. 2013.
* [3] D. W. Griffin and J. S. Lim,
"Signal estimation from modified short-time Fourier transform,"
IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984.
Args:
specgram (Tensor): A magnitude-only STFT spectrogram of dimension (..., freq, frames)
where freq is ``n_fft // 2 + 1``.
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins
hop_length (int): Length of hop between STFT windows. (
Default: ``win_length // 2``)
win_length (int): Window size. (Default: ``n_fft``)
power (float): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
normalized (bool): Whether to normalize by magnitude after stft.
n_iter (int): Number of iteration for phase recovery process.
momentum (float): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
Values near 1 can lead to faster convergence, but above 1 may not converge.
length (int or None): Array length of the expected output.
rand_init (bool): Initializes phase randomly if True, to zero otherwise.
Returns:
torch.Tensor: waveform of (..., time), where time equals the ``length`` parameter if given.
"""
assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)
assert momentum >= 0, 'momentum={} < 0'.format(momentum)
if normalized:
warnings.warn(
"The argument normalized is not used in Griffin-Lim, "
"and will be removed in v0.9.0 release. To suppress this warning, "
"please use `normalized=False`.")
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
specgram = specgram.pow(1 / power)
# randomly initialize the phase
batch, freq, frames = specgram.size()
if rand_init:
angles = 2 * math.pi * torch.rand(batch, freq, frames)
else:
angles = torch.zeros(batch, freq, frames)
angles = torch.stack([angles.cos(), angles.sin()], dim=-1) \
.to(dtype=specgram.dtype, device=specgram.device)
specgram = specgram.unsqueeze(-1).expand_as(angles)
# And initialize the previous iterate to 0
rebuilt = torch.tensor(0.)
for _ in range(n_iter):
# Store the previous iterate
tprev = rebuilt
# Invert with our current estimate of the phases
inverse = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length).float()
# Rebuild the spectrogram
rebuilt = torch.view_as_real(
torch.stft(
input=inverse,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=True,
pad_mode='reflect',
normalized=False,
onesided=True,
return_complex=True,
)
)
# Update our phase estimates
angles = rebuilt
if momentum:
angles = angles - tprev.mul_(momentum / (1 + momentum))
angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles))
# Return the final phase estimates
waveform = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length)
# unpack batch
waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:])
return waveform
def amplitude_to_DB(
x: Tensor,
multiplier: float,
amin: float,
db_multiplier: float,
top_db: Optional[float] = None
) -> Tensor:
r"""Turn a spectrogram from the power/amplitude scale to the decibel scale.
The output of each tensor in a batch depends on the maximum value of that tensor,
and so may return different values for an audio clip split into snippets vs. a full clip.
Args:
x (Tensor): Input spectrogram(s) before being converted to decibel scale. Input should take
the form `(..., freq, time)`. Batched inputs should include a channel dimension and
have the form `(batch, channel, freq, time)`.
multiplier (float): Use 10. for power and 20. for amplitude
amin (float): Number to clamp ``x``
db_multiplier (float): Log10(max(reference value and amin))
top_db (float or None, optional): Minimum negative cut-off in decibels. A reasonable number
is 80. (Default: ``None``)
Returns:
Tensor: Output tensor in decibel scale
"""
x_db = multiplier * torch.log10(torch.clamp(x, min=amin))
x_db -= multiplier * db_multiplier
if top_db is not None:
# Expand batch
shape = x_db.size()
packed_channels = shape[-3] if x_db.dim() > 2 else 1
x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1])
x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1))
# Repack batch
x_db = x_db.reshape(shape)
return x_db
def DB_to_amplitude(
x: Tensor,
ref: float,
power: float
) -> Tensor:
r"""Turn a tensor from the decibel scale to the power/amplitude scale.
Args:
x (Tensor): Input tensor before being converted to power/amplitude scale.
ref (float): Reference which the output will be scaled by.
power (float): If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude.
Returns:
Tensor: Output tensor in power/amplitude scale.
"""
return ref * torch.pow(torch.pow(10.0, 0.1 * x), power)
def _hz_to_mel(freq: float, mel_scale: str = "htk") -> float:
r"""Convert Hz to Mels.
Args:
freqs (float): Frequencies in Hz
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
mels (float): Frequency in Mels
"""
if mel_scale not in ['slaney', 'htk']:
raise ValueError('mel_scale should be one of "htk" or "slaney".')
if mel_scale == "htk":
return 2595.0 * math.log10(1.0 + (freq / 700.0))
# Fill in the linear part
f_min = 0.0
f_sp = 200.0 / 3
mels = (freq - f_min) / f_sp
# Fill in the log-scale part
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = math.log(6.4) / 27.0
if freq >= min_log_hz:
mels = min_log_mel + math.log(freq / min_log_hz) / logstep
return mels
def _mel_to_hz(mels: Tensor, mel_scale: str = "htk") -> Tensor:
"""Convert mel bin numbers to frequencies.
Args:
mels (Tensor): Mel frequencies
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
freqs (Tensor): Mels converted in Hz
"""
if mel_scale not in ['slaney', 'htk']:
raise ValueError('mel_scale should be one of "htk" or "slaney".')
if mel_scale == "htk":
return 700.0 * (10.0**(mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = math.log(6.4) / 27.0
log_t = (mels >= min_log_mel)
freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel))
return freqs
def create_fb_matrix(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
norm: Optional[str] = None,
mel_scale: str = "htk",
) -> Tensor:
r"""Create a frequency bin conversion matrix.
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_mels (int): Number of mel filterbanks
sample_rate (int): Sample rate of the audio waveform
norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A * create_fb_matrix(A.size(-1), ...)``.
"""
if norm is not None and norm != "slaney":
raise ValueError("norm must be one of None or 'slaney'")
# freq bins
# Equivalent filterbank construction by Librosa
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# calculate mel freq bins
m_min = _hz_to_mel(f_min, mel_scale=mel_scale)
m_max = _hz_to_mel(f_max, mel_scale=mel_scale)
m_pts = torch.linspace(m_min, m_max, n_mels + 2)
f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale)
# calculate the difference between each mel point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2)
# create overlapping triangles
zero = torch.zeros(1)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)
fb = torch.max(zero, torch.min(down_slopes, up_slopes))
if norm is not None and norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
fb *= enorm.unsqueeze(0)
if (fb.max(dim=0).values == 0.).any():
warnings.warn(
"At least one mel filterbank has all zero values. "
f"The value for `n_mels` ({n_mels}) may be set too high. "
f"Or, the value for `n_freqs` ({n_freqs}) may be set too low."
)
return fb
def create_dct(
n_mfcc: int,
n_mels: int,
norm: Optional[str]
) -> Tensor:
r"""Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``),
normalized depending on norm.
Args:
n_mfcc (int): Number of mfc coefficients to retain
n_mels (int): Number of mel filterbanks
norm (str or None): Norm to use (either 'ortho' or None)
Returns:
Tensor: The transformation matrix, to be right-multiplied to
row-wise data of size (``n_mels``, ``n_mfcc``).
"""
# http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II
n = torch.arange(float(n_mels))
k = torch.arange(float(n_mfcc)).unsqueeze(1)
dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) # size (n_mfcc, n_mels)
if norm is None:
dct *= 2.0
else:
assert norm == "ortho"
dct[0] *= 1.0 / math.sqrt(2.0)
dct *= math.sqrt(2.0 / float(n_mels))
return dct.t()
def mu_law_encoding(
x: Tensor,
quantization_channels: int
) -> Tensor:
r"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1.
Args:
x (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law encoding
"""
mu = quantization_channels - 1.0
if not x.is_floating_point():
x = x.to(torch.float)
mu = torch.tensor(mu, dtype=x.dtype)
x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
return x_mu
def mu_law_decoding(
x_mu: Tensor,
quantization_channels: int
) -> Tensor:
r"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
x_mu (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law decoding
"""
mu = quantization_channels - 1.0
if not x_mu.is_floating_point():
x_mu = x_mu.to(torch.float)
mu = torch.tensor(mu, dtype=x_mu.dtype)
x = ((x_mu) / mu) * 2 - 1.0
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
return x
def complex_norm(
complex_tensor: Tensor,
power: float = 1.0
) -> Tensor:
r"""Compute the norm of complex tensor input.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
power (float): Power of the norm. (Default: `1.0`).
Returns:
Tensor: Power of the normed input tensor. Shape of `(..., )`
"""
# Replace by torch.norm once issue is fixed
# https://github.com/pytorch/pytorch/issues/34279
return complex_tensor.pow(2.).sum(-1).pow(0.5 * power)
def angle(
complex_tensor: Tensor
) -> Tensor:
r"""Compute the angle of complex tensor input.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
Return:
Tensor: Angle of a complex tensor. Shape of `(..., )`
"""
return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0])
def magphase(
complex_tensor: Tensor,
power: float = 1.0
) -> Tuple[Tensor, Tensor]:
r"""Separate a complex-valued spectrogram with shape `(..., 2)` into its magnitude and phase.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
power (float): Power of the norm. (Default: `1.0`)
Returns:
(Tensor, Tensor): The magnitude and phase of the complex tensor
"""
mag = complex_norm(complex_tensor, power)
phase = angle(complex_tensor)
return mag, phase
def phase_vocoder(
complex_specgrams: Tensor,
rate: float,
phase_advance: Tensor
) -> Tensor:
r"""Given a STFT tensor, speed up in time without modifying pitch by a
factor of ``rate``.
Args:
complex_specgrams (Tensor): Dimension of `(..., freq, time, complex=2)`
rate (float): Speed-up factor
phase_advance (Tensor): Expected phase advance in each bin. Dimension of (freq, 1)
Returns:
Tensor: Complex Specgrams Stretch with dimension of `(..., freq, ceil(time/rate), complex=2)`
Example
>>> freq, hop_length = 1025, 512
>>> # (channel, freq, time, complex=2)
>>> complex_specgrams = torch.randn(2, freq, 300, 2)
>>> rate = 1.3 # Speed up by 30%
>>> phase_advance = torch.linspace(
>>> 0, math.pi * hop_length, freq)[..., None]
>>> x = phase_vocoder(complex_specgrams, rate, phase_advance)
>>> x.shape # with 231 == ceil(300 / 1.3)
torch.Size([2, 1025, 231, 2])
"""
# pack batch
shape = complex_specgrams.size()
complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-3:]))
time_steps = torch.arange(0,
complex_specgrams.size(-2),
rate,
device=complex_specgrams.device,
dtype=complex_specgrams.dtype)
alphas = time_steps % 1.0
phase_0 = angle(complex_specgrams[..., :1, :])
# Time Padding
complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2])
# (new_bins, freq, 2)
complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long())
complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps + 1).long())
angle_0 = angle(complex_specgrams_0)
angle_1 = angle(complex_specgrams_1)
norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1)
norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1)
phase = angle_1 - angle_0 - phase_advance
phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi))
# Compute Phase Accum
phase = phase + phase_advance
phase = torch.cat([phase_0, phase[..., :-1]], dim=-1)
phase_acc = torch.cumsum(phase, -1)
mag = alphas * norm_1 + (1 - alphas) * norm_0
real_stretch = mag * torch.cos(phase_acc)
imag_stretch = mag * torch.sin(phase_acc)
complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1)
# unpack batch
complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:])
return complex_specgrams_stretch
def mask_along_axis_iid(
specgrams: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
Args:
specgrams (Tensor): Real spectrograms (batch, channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (2 -> frequency, 3 -> time)
Returns:
Tensor: Masked spectrograms of dimensions (batch, channel, freq, time)
"""
if axis != 2 and axis != 3:
raise ValueError('Only Frequency and Time masking are supported')
device = specgrams.device
dtype = specgrams.dtype
value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param
min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value)
# Create broadcastable mask
mask_start = min_value[..., None, None]
mask_end = (min_value + value)[..., None, None]
mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype)
# Per batch example masking
specgrams = specgrams.transpose(axis, -1)
specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value)
specgrams = specgrams.transpose(axis, -1)
return specgrams
def mask_along_axis(
specgram: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
All examples will have the same mask interval.
Args:
specgram (Tensor): Real spectrogram (channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (1 -> frequency, 2 -> time)
Returns:
Tensor: Masked spectrogram of dimensions (channel, freq, time)
"""
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
value = torch.rand(1) * mask_param
min_value = torch.rand(1) * (specgram.size(axis) - value)
mask_start = (min_value.long()).squeeze()
mask_end = (min_value.long() + value.long()).squeeze()
assert mask_end - mask_start < mask_param
if axis == 1:
specgram[:, mask_start:mask_end] = mask_value
elif axis == 2:
specgram[:, :, mask_start:mask_end] = mask_value
else:
raise ValueError('Only Frequency and Time masking are supported')
# unpack batch
specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:])
return specgram
def compute_deltas(
specgram: Tensor,
win_length: int = 5,
mode: str = "replicate"
) -> Tensor:
r"""Compute delta coefficients of a tensor, usually a spectrogram:
.. math::
d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N}} n^2}
where :math:`d_t` is the deltas at time :math:`t`,
:math:`c_t` is the spectrogram coeffcients at time :math:`t`,
:math:`N` is ``(win_length-1)//2``.
Args:
specgram (Tensor): Tensor of audio of dimension (..., freq, time)
win_length (int, optional): The window length used for computing delta (Default: ``5``)
mode (str, optional): Mode parameter passed to padding (Default: ``"replicate"``)
Returns:
Tensor: Tensor of deltas of dimension (..., freq, time)
Example
>>> specgram = torch.randn(1, 40, 1000)
>>> delta = compute_deltas(specgram)
>>> delta2 = compute_deltas(delta)
"""
device = specgram.device
dtype = specgram.dtype
# pack batch
shape = specgram.size()
specgram = specgram.reshape(1, -1, shape[-1])
assert win_length >= 3
n = (win_length - 1) // 2
# twice sum of integer squared
denom = n * (n + 1) * (2 * n + 1) / 3
specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode)
kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1)
output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom
# unpack batch
output = output.reshape(shape)
return output
def _compute_nccf(
waveform: Tensor,
sample_rate: int,
frame_time: float,
freq_low: int
) -> Tensor:
r"""
Compute Normalized Cross-Correlation Function (NCCF).
.. math::
\phi_i(m) = \frac{\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\sqrt{E(b_i) E(m+b_i)}},
where
:math:`\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`,
:math:`w` is the waveform,
:math:`N` is the length of a frame,
:math:`b_i` is the beginning of frame :math:`i`,
:math:`E(j)` is the energy :math:`\sum_{n=j}^{j+N-1} w^2(n)`.
"""
EPSILON = 10 ** (-9)
# Number of lags to check
lags = int(math.ceil(sample_rate / freq_low))
frame_size = int(math.ceil(sample_rate * frame_time))
waveform_length = waveform.size()[-1]
num_of_frames = int(math.ceil(waveform_length / frame_size))
p = lags + num_of_frames * frame_size - waveform_length
waveform = torch.nn.functional.pad(waveform, (0, p))
# Compute lags
output_lag = []
for lag in range(1, lags + 1):
s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
output_frames = (
(s1 * s2).sum(-1)
/ (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2)
/ (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2)
)
output_lag.append(output_frames.unsqueeze(-1))
nccf = torch.cat(output_lag, -1)
return nccf
def _combine_max(
a: Tuple[Tensor, Tensor],
b: Tuple[Tensor, Tensor],
thresh: float = 0.99
) -> Tuple[Tensor, Tensor]:
"""
Take value from first if bigger than a multiplicative factor of the second, elementwise.
"""
mask = (a[0] > thresh * b[0])
values = mask * a[0] + ~mask * b[0]
indices = mask * a[1] + ~mask * b[1]
return values, indices
def _find_max_per_frame(
nccf: Tensor,
sample_rate: int,
freq_high: int
) -> Tensor:
r"""
For each frame, take the highest value of NCCF,
apply centered median smoothing, and convert to frequency.
Note: If the max among all the lags is very close
to the first half of lags, then the latter is taken.
"""
lag_min = int(math.ceil(sample_rate / freq_high))
# Find near enough max that is smallest
best = torch.max(nccf[..., lag_min:], -1)
half_size = nccf.shape[-1] // 2
half = torch.max(nccf[..., lag_min:half_size], -1)
best = _combine_max(half, best)
indices = best[1]
# Add back minimal lag
indices += lag_min
# Add 1 empirical calibration offset
indices += 1
return indices
def _median_smoothing(
indices: Tensor,
win_length: int
) -> Tensor:
r"""
Apply median smoothing to the 1D tensor over the given window.
"""
# Centered windowed
pad_length = (win_length - 1) // 2
# "replicate" padding in any dimension
indices = torch.nn.functional.pad(
indices, (pad_length, 0), mode="constant", value=0.
)
indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1)
roll = indices.unfold(-1, win_length, 1)
values, _ = torch.median(roll, -1)
return values
def detect_pitch_frequency(
waveform: Tensor,
sample_rate: int,
frame_time: float = 10 ** (-2),
win_length: int = 30,
freq_low: int = 85,
freq_high: int = 3400,
) -> Tensor:
r"""Detect pitch frequency.
It is implemented using normalized cross-correlation function and median smoothing.
Args:
waveform (Tensor): Tensor of audio of dimension (..., freq, time)
sample_rate (int): The sample rate of the waveform (Hz)
frame_time (float, optional): Duration of a frame (Default: ``10 ** (-2)``).
win_length (int, optional): The window length for median smoothing (in number of frames) (Default: ``30``).
freq_low (int, optional): Lowest frequency that can be detected (Hz) (Default: ``85``).
freq_high (int, optional): Highest frequency that can be detected (Hz) (Default: ``3400``).
Returns:
Tensor: Tensor of freq of dimension (..., frame)
"""
# pack batch
shape = list(waveform.size())
waveform = waveform.reshape([-1] + shape[-1:])
nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low)
indices = _find_max_per_frame(nccf, sample_rate, freq_high)
indices = _median_smoothing(indices, win_length)
# Convert indices to frequency
EPSILON = 10 ** (-9)
freq = sample_rate / (EPSILON + indices.to(torch.float))
# unpack batch
freq = freq.reshape(shape[:-1] + list(freq.shape[-1:]))
return freq
def sliding_window_cmn(
waveform: Tensor,
cmn_window: int = 600,
min_cmn_window: int = 100,
center: bool = False,
norm_vars: bool = False,
) -> Tensor:
r"""
Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.
Args:
waveform (Tensor): Tensor of audio of dimension (..., freq, time)
cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)
min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).
Only applicable if center == false, ignored if center==true (int, default = 100)
center (bool, optional): If true, use a window centered on the current frame
(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)
norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false)
Returns:
Tensor: Tensor of freq of dimension (..., frame)
"""
input_shape = waveform.shape
num_frames, num_feats = input_shape[-2:]
waveform = waveform.view(-1, num_frames, num_feats)
num_channels = waveform.shape[0]
dtype = waveform.dtype
device = waveform.device
last_window_start = last_window_end = -1
cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cmn_waveform = torch.zeros(
num_channels, num_frames, num_feats, dtype=dtype, device=device)
for t in range(num_frames):
window_start = 0
window_end = 0
if center:
window_start = t - cmn_window // 2
window_end = window_start + cmn_window
else:
window_start = t - cmn_window
window_end = t + 1
if window_start < 0:
window_end -= window_start
window_start = 0
if not center:
if window_end > t:
window_end = max(t + 1, min_cmn_window)
if window_end > num_frames:
window_start -= (window_end - num_frames)
window_end = num_frames
if window_start < 0:
window_start = 0
if last_window_start == -1:
input_part = waveform[:, window_start: window_end - window_start, :]
cur_sum += torch.sum(input_part, 1)
if norm_vars:
cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :]
else:
if window_start > last_window_start:
frame_to_remove = waveform[:, last_window_start, :]
cur_sum -= frame_to_remove
if norm_vars:
cur_sumsq -= (frame_to_remove ** 2)
if window_end > last_window_end:
frame_to_add = waveform[:, last_window_end, :]
cur_sum += frame_to_add
if norm_vars:
cur_sumsq += (frame_to_add ** 2)
window_frames = window_end - window_start
last_window_start = window_start
last_window_end = window_end
cmn_waveform[:, t, :] = waveform[:, t, :] - cur_sum / window_frames
if norm_vars:
if window_frames == 1:
cmn_waveform[:, t, :] = torch.zeros(
num_channels, num_feats, dtype=dtype, device=device)
else:
variance = cur_sumsq
variance = variance / window_frames
variance -= ((cur_sum ** 2) / (window_frames ** 2))
variance = torch.pow(variance, -0.5)
cmn_waveform[:, t, :] *= variance
cmn_waveform = cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats))
if len(input_shape) == 2:
cmn_waveform = cmn_waveform.squeeze(0)
return cmn_waveform
def spectral_centroid(
waveform: Tensor,
sample_rate: int,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
) -> Tensor:
r"""
Compute the spectral centroid for each channel along the time axis.
The spectral centroid is defined as the weighted average of the
frequency values, weighted by their magnitude.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
sample_rate (int): Sample rate of the audio waveform
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
Returns:
Tensor: Dimension (..., time)
"""
specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, power=1., normalized=False)
freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2,
device=specgram.device).reshape((-1, 1))
freq_dim = -2
return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim)
@_mod_utils.requires_sox()
def apply_codec(
waveform: Tensor,
sample_rate: int,
format: str,
channels_first: bool = True,
compression: Optional[float] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
) -> Tensor:
r"""
Apply codecs as a form of augmentation.
Args:
waveform (Tensor): Audio data. Must be 2 dimensional. See also ```channels_first```.
sample_rate (int): Sample rate of the audio waveform.
format (str): File format.
channels_first (bool):
When True, both the input and output Tensor have dimension ``[channel, time]``.
Otherwise, they have dimension ``[time, channel]``.
compression (float): Used for formats other than WAV.
For mor details see :py:func:`torchaudio.backend.sox_io_backend.save`.
encoding (str, optional): Changes the encoding for the supported formats.
For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.
bits_per_sample (int, optional): Changes the bit depth for the supported formats.
For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.
Returns:
torch.Tensor: Resulting Tensor.
If ``channels_first=True``, it has ``[channel, time]`` else ``[time, channel]``.
"""
bytes = io.BytesIO()
torchaudio.backend.sox_io_backend.save(bytes,
waveform,
sample_rate,
channels_first,
compression,
format,
encoding,
bits_per_sample
)
bytes.seek(0)
augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file(
bytes, effects=[["rate", f"{sample_rate}"]], channels_first=channels_first, format=format)
return augmented
@_mod_utils.requires_kaldi()
def compute_kaldi_pitch(
waveform: torch.Tensor,
sample_rate: float,
frame_length: float = 25.0,
frame_shift: float = 10.0,
min_f0: float = 50,
max_f0: float = 400,
soft_min_f0: float = 10.0,
penalty_factor: float = 0.1,
lowpass_cutoff: float = 1000,
resample_frequency: float = 4000,
delta_pitch: float = 0.005,
nccf_ballast: float = 7000,
lowpass_filter_width: int = 1,
upsample_filter_width: int = 5,
max_frames_latency: int = 0,
frames_per_chunk: int = 0,
simulate_first_pass_online: bool = False,
recompute_frame: int = 500,
snip_edges: bool = True,
) -> torch.Tensor:
"""Extract pitch based on method described in [1].
This function computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi.
Args:
waveform (Tensor):
The input waveform of shape `(..., time)`.
sample_rate (float):
Sample rate of `waveform`.
frame_length (float, optional):
Frame length in milliseconds. (default: 25.0)
frame_shift (float, optional):
Frame shift in milliseconds. (default: 10.0)
min_f0 (float, optional):
Minimum F0 to search for (Hz) (default: 50.0)
max_f0 (float, optional):
Maximum F0 to search for (Hz) (default: 400.0)
soft_min_f0 (float, optional):
Minimum f0, applied in soft way, must not exceed min-f0 (default: 10.0)
penalty_factor (float, optional):
Cost factor for FO change. (default: 0.1)
lowpass_cutoff (float, optional):
Cutoff frequency for LowPass filter (Hz) (default: 1000)
resample_frequency (float, optional):
Frequency that we down-sample the signal to. Must be more than twice lowpass-cutoff.
(default: 4000)
delta_pitch( float, optional):
Smallest relative change in pitch that our algorithm measures. (default: 0.005)
nccf_ballast (float, optional):
Increasing this factor reduces NCCF for quiet frames (default: 7000)
lowpass_filter_width (int, optional):
Integer that determines filter width of lowpass filter, more gives sharper filter.
(default: 1)
upsample_filter_width (int, optional):
Integer that determines filter width when upsampling NCCF. (default: 5)
max_frames_latency (int, optional):
Maximum number of frames of latency that we allow pitch tracking to introduce into
the feature processing (affects output only if ``frames_per_chunk > 0`` and
``simulate_first_pass_online=True``) (default: 0)
frames_per_chunk (int, optional):
The number of frames used for energy normalization. (default: 0)
simulate_first_pass_online (bool, optional):
If true, the function will output features that correspond to what an online decoder
would see in the first pass of decoding -- not the final version of the features,
which is the default. (default: False)
Relevant if ``frames_per_chunk > 0``.
recompute_frame (int, optional):
Only relevant for compatibility with online pitch extraction.
A non-critical parameter; the frame at which we recompute some of the forward pointers,
after revising our estimate of the signal energy.
Relevant if ``frames_per_chunk > 0``. (default: 500)
snip_edges (bool, optional):
If this is set to false, the incomplete frames near the ending edge won't be snipped,
so that the number of frames is the file size divided by the frame-shift.
This makes different types of features give the same number of frames. (default: True)
Returns:
Tensor: Pitch feature. Shape: ``(batch, frames 2)`` where the last dimension
corresponds to pitch and NCCF.
Reference:
- A pitch extraction algorithm tuned for automatic speech recognition
P. Ghahremani, B. BabaAli, D. Povey, K. Riedhammer, J. Trmal and S. Khudanpur
2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP),
Florence, 2014, pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049.
"""
shape = waveform.shape
waveform = waveform.reshape(-1, shape[-1])
result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch(
waveform, sample_rate, frame_length, frame_shift,
min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff,
resample_frequency, delta_pitch, nccf_ballast,
lowpass_filter_width, upsample_filter_width, max_frames_latency,
frames_per_chunk, simulate_first_pass_online, recompute_frame,
snip_edges,
)
result = result.reshape(shape[:-1] + result.shape[-2:])
return result
| [((36149, 36174), 'torchaudio._internal.module_utils.requires_sox', '_mod_utils.requires_sox', ([], {}), '()\n', (36172, 36174), True, 'from torchaudio._internal import module_utils as _mod_utils\n'), ((38218, 38245), 'torchaudio._internal.module_utils.requires_kaldi', '_mod_utils.requires_kaldi', ([], {}), '()\n', (38243, 38245), True, 'from torchaudio._internal import module_utils as _mod_utils\n'), ((2825, 3027), 'torch.stft', 'torch.stft', ([], {'input': 'waveform', 'n_fft': 'n_fft', 'hop_length': 'hop_length', 'win_length': 'win_length', 'window': 'window', 'center': 'center', 'pad_mode': 'pad_mode', 'normalized': '(False)', 'onesided': 'onesided', 'return_complex': '(True)'}), '(input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=\n win_length, window=window, center=center, pad_mode=pad_mode, normalized\n =False, onesided=onesided, return_complex=True)\n', (2835, 3027), False, 'import torch\n'), ((3384, 3410), 'torch.view_as_real', 'torch.view_as_real', (['spec_f'], {}), '(spec_f)\n', (3402, 3410), False, 'import torch\n'), ((6728, 6745), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (6740, 6745), False, 'import torch\n'), ((7946, 8069), 'torch.istft', 'torch.istft', (['(specgram * angles)'], {'n_fft': 'n_fft', 'hop_length': 'hop_length', 'win_length': 'win_length', 'window': 'window', 'length': 'length'}), '(specgram * angles, n_fft=n_fft, hop_length=hop_length,\n win_length=win_length, window=window, length=length)\n', (7957, 8069), False, 'import torch\n'), ((13565, 13609), 'torch.linspace', 'torch.linspace', (['(0)', '(sample_rate // 2)', 'n_freqs'], {}), '(0, sample_rate // 2, n_freqs)\n', (13579, 13609), False, 'import torch\n'), ((13756, 13796), 'torch.linspace', 'torch.linspace', (['m_min', 'm_max', '(n_mels + 2)'], {}), '(m_min, m_max, n_mels + 2)\n', (13770, 13796), False, 'import torch\n'), ((14117, 14131), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (14128, 14131), False, 'import torch\n'), ((16495, 16526), 'torch.tensor', 'torch.tensor', (['mu'], {'dtype': 'x.dtype'}), '(mu, dtype=x.dtype)\n', (16507, 16526), False, 'import torch\n'), ((17321, 17355), 'torch.tensor', 'torch.tensor', (['mu'], {'dtype': 'x_mu.dtype'}), '(mu, dtype=x_mu.dtype)\n', (17333, 17355), False, 'import torch\n'), ((18295, 18354), 'torch.atan2', 'torch.atan2', (['complex_tensor[..., 1]', 'complex_tensor[..., 0]'], {}), '(complex_tensor[..., 1], complex_tensor[..., 0])\n', (18306, 18354), False, 'import torch\n'), ((20423, 20479), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['complex_specgrams', '[0, 0, 0, 2]'], {}), '(complex_specgrams, [0, 0, 0, 2])\n', (20446, 20479), False, 'import torch\n'), ((20770, 20814), 'torch.norm', 'torch.norm', (['complex_specgrams_0'], {'p': '(2)', 'dim': '(-1)'}), '(complex_specgrams_0, p=2, dim=-1)\n', (20780, 20814), False, 'import torch\n'), ((20828, 20872), 'torch.norm', 'torch.norm', (['complex_specgrams_1'], {'p': '(2)', 'dim': '(-1)'}), '(complex_specgrams_1, p=2, dim=-1)\n', (20838, 20872), False, 'import torch\n'), ((21062, 21109), 'torch.cat', 'torch.cat', (['[phase_0, phase[(...), :-1]]'], {'dim': '(-1)'}), '([phase_0, phase[(...), :-1]], dim=-1)\n', (21071, 21109), False, 'import torch\n'), ((21124, 21147), 'torch.cumsum', 'torch.cumsum', (['phase', '(-1)'], {}), '(phase, -1)\n', (21136, 21147), False, 'import torch\n'), ((21325, 21374), 'torch.stack', 'torch.stack', (['[real_stretch, imag_stretch]'], {'dim': '(-1)'}), '([real_stretch, imag_stretch], dim=-1)\n', (21336, 21374), False, 'import torch\n'), ((25879, 25931), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['specgram', '(n, n)'], {'mode': 'mode'}), '(specgram, (n, n), mode=mode)\n', (25902, 25931), False, 'import torch\n'), ((27138, 27179), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['waveform', '(0, p)'], {}), '(waveform, (0, p))\n', (27161, 27179), False, 'import torch\n'), ((27697, 27722), 'torch.cat', 'torch.cat', (['output_lag', '(-1)'], {}), '(output_lag, -1)\n', (27706, 27722), False, 'import torch\n'), ((28603, 28639), 'torch.max', 'torch.max', (['nccf[(...), lag_min:]', '(-1)'], {}), '(nccf[(...), lag_min:], -1)\n', (28612, 28639), False, 'import torch\n'), ((28686, 28731), 'torch.max', 'torch.max', (['nccf[(...), lag_min:half_size]', '(-1)'], {}), '(nccf[(...), lag_min:half_size], -1)\n', (28695, 28731), False, 'import torch\n'), ((29211, 29288), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['indices', '(pad_length, 0)'], {'mode': '"""constant"""', 'value': '(0.0)'}), "(indices, (pad_length, 0), mode='constant', value=0.0)\n", (29234, 29288), False, 'import torch\n'), ((29470, 29492), 'torch.median', 'torch.median', (['roll', '(-1)'], {}), '(roll, -1)\n', (29482, 29492), False, 'import torch\n'), ((32333, 32397), 'torch.zeros', 'torch.zeros', (['num_channels', 'num_feats'], {'dtype': 'dtype', 'device': 'device'}), '(num_channels, num_feats, dtype=dtype, device=device)\n', (32344, 32397), False, 'import torch\n'), ((32414, 32478), 'torch.zeros', 'torch.zeros', (['num_channels', 'num_feats'], {'dtype': 'dtype', 'device': 'device'}), '(num_channels, num_feats, dtype=dtype, device=device)\n', (32425, 32478), False, 'import torch\n'), ((32498, 32574), 'torch.zeros', 'torch.zeros', (['num_channels', 'num_frames', 'num_feats'], {'dtype': 'dtype', 'device': 'device'}), '(num_channels, num_frames, num_feats, dtype=dtype, device=device)\n', (32509, 32574), False, 'import torch\n'), ((37508, 37520), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (37518, 37520), False, 'import io\n'), ((37525, 37661), 'torchaudio.backend.sox_io_backend.save', 'torchaudio.backend.sox_io_backend.save', (['bytes', 'waveform', 'sample_rate', 'channels_first', 'compression', 'format', 'encoding', 'bits_per_sample'], {}), '(bytes, waveform, sample_rate,\n channels_first, compression, format, encoding, bits_per_sample)\n', (37563, 37661), False, 'import torchaudio\n'), ((38040, 38189), 'torchaudio.sox_effects.sox_effects.apply_effects_file', 'torchaudio.sox_effects.sox_effects.apply_effects_file', (['bytes'], {'effects': "[['rate', f'{sample_rate}']]", 'channels_first': 'channels_first', 'format': 'format'}), "(bytes, effects=[[\n 'rate', f'{sample_rate}']], channels_first=channels_first, format=format)\n", (38093, 38189), False, 'import torchaudio\n'), ((42711, 43069), 'torch.ops.torchaudio.kaldi_ComputeKaldiPitch', 'torch.ops.torchaudio.kaldi_ComputeKaldiPitch', (['waveform', 'sample_rate', 'frame_length', 'frame_shift', 'min_f0', 'max_f0', 'soft_min_f0', 'penalty_factor', 'lowpass_cutoff', 'resample_frequency', 'delta_pitch', 'nccf_ballast', 'lowpass_filter_width', 'upsample_filter_width', 'max_frames_latency', 'frames_per_chunk', 'simulate_first_pass_online', 'recompute_frame', 'snip_edges'], {}), '(waveform, sample_rate,\n frame_length, frame_shift, min_f0, max_f0, soft_min_f0, penalty_factor,\n lowpass_cutoff, resample_frequency, delta_pitch, nccf_ballast,\n lowpass_filter_width, upsample_filter_width, max_frames_latency,\n frames_per_chunk, simulate_first_pass_online, recompute_frame, snip_edges)\n', (42755, 43069), False, 'import torch\n'), ((2584, 2641), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['waveform', '(pad, pad)', '"""constant"""'], {}), "(waveform, (pad, pad), 'constant')\n", (2607, 2641), False, 'import torch\n'), ((5916, 6090), 'warnings.warn', 'warnings.warn', (['"""The argument normalized is not used in Griffin-Lim, and will be removed in v0.9.0 release. To suppress this warning, please use `normalized=False`."""'], {}), "(\n 'The argument normalized is not used in Griffin-Lim, and will be removed in v0.9.0 release. To suppress this warning, please use `normalized=False`.'\n )\n", (5929, 6090), False, 'import warnings\n'), ((6454, 6486), 'torch.zeros', 'torch.zeros', (['batch', 'freq', 'frames'], {}), '(batch, freq, frames)\n', (6465, 6486), False, 'import torch\n'), ((11164, 11177), 'math.log', 'math.log', (['(6.4)'], {}), '(6.4)\n', (11172, 11177), False, 'import math\n'), ((12037, 12050), 'math.log', 'math.log', (['(6.4)'], {}), '(6.4)\n', (12045, 12050), False, 'import math\n'), ((12125, 12173), 'torch.exp', 'torch.exp', (['(logstep * (mels[log_t] - min_log_mel))'], {}), '(logstep * (mels[log_t] - min_log_mel))\n', (12134, 12173), False, 'import torch\n'), ((14298, 14331), 'torch.min', 'torch.min', (['down_slopes', 'up_slopes'], {}), '(down_slopes, up_slopes)\n', (14307, 14331), False, 'import torch\n'), ((14604, 14795), 'warnings.warn', 'warnings.warn', (['f"""At least one mel filterbank has all zero values. The value for `n_mels` ({n_mels}) may be set too high. Or, the value for `n_freqs` ({n_freqs}) may be set too low."""'], {}), "(\n f'At least one mel filterbank has all zero values. The value for `n_mels` ({n_mels}) may be set too high. Or, the value for `n_freqs` ({n_freqs}) may be set too low.'\n )\n", (14617, 14795), False, 'import warnings\n'), ((16587, 16602), 'torch.log1p', 'torch.log1p', (['mu'], {}), '(mu)\n', (16598, 16602), False, 'import torch\n'), ((21225, 21245), 'torch.cos', 'torch.cos', (['phase_acc'], {}), '(phase_acc)\n', (21234, 21245), False, 'import torch\n'), ((21271, 21291), 'torch.sin', 'torch.sin', (['phase_acc'], {}), '(phase_acc)\n', (21280, 21291), False, 'import torch\n'), ((22495, 22554), 'torch.rand', 'torch.rand', (['specgrams.shape[:2]'], {'device': 'device', 'dtype': 'dtype'}), '(specgrams.shape[:2], device=device, dtype=dtype)\n', (22505, 22554), False, 'import torch\n'), ((22584, 22643), 'torch.rand', 'torch.rand', (['specgrams.shape[:2]'], {'device': 'device', 'dtype': 'dtype'}), '(specgrams.shape[:2], device=device, dtype=dtype)\n', (22594, 22643), False, 'import torch\n'), ((24023, 24036), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (24033, 24036), False, 'import torch\n'), ((24066, 24079), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (24076, 24079), False, 'import torch\n'), ((26047, 26117), 'torch.nn.functional.conv1d', 'torch.nn.functional.conv1d', (['specgram', 'kernel'], {'groups': 'specgram.shape[1]'}), '(specgram, kernel, groups=specgram.shape[1])\n', (26073, 26117), False, 'import torch\n'), ((26860, 26893), 'math.ceil', 'math.ceil', (['(sample_rate / freq_low)'], {}), '(sample_rate / freq_low)\n', (26869, 26893), False, 'import math\n'), ((26917, 26952), 'math.ceil', 'math.ceil', (['(sample_rate * frame_time)'], {}), '(sample_rate * frame_time)\n', (26926, 26952), False, 'import math\n'), ((27021, 27060), 'math.ceil', 'math.ceil', (['(waveform_length / frame_size)'], {}), '(waveform_length / frame_size)\n', (27030, 27060), False, 'import math\n'), ((28510, 28544), 'math.ceil', 'math.ceil', (['(sample_rate / freq_high)'], {}), '(sample_rate / freq_high)\n', (28519, 28544), False, 'import math\n'), ((6395, 6426), 'torch.rand', 'torch.rand', (['batch', 'freq', 'frames'], {}), '(batch, freq, frames)\n', (6405, 6426), False, 'import torch\n'), ((7274, 7470), 'torch.stft', 'torch.stft', ([], {'input': 'inverse', 'n_fft': 'n_fft', 'hop_length': 'hop_length', 'win_length': 'win_length', 'window': 'window', 'center': '(True)', 'pad_mode': '"""reflect"""', 'normalized': '(False)', 'onesided': '(True)', 'return_complex': '(True)'}), "(input=inverse, n_fft=n_fft, hop_length=hop_length, win_length=\n win_length, window=window, center=True, pad_mode='reflect', normalized=\n False, onesided=True, return_complex=True)\n", (7284, 7470), False, 'import torch\n'), ((9427, 9451), 'torch.clamp', 'torch.clamp', (['x'], {'min': 'amin'}), '(x, min=amin)\n', (9438, 9451), False, 'import torch\n'), ((10411, 10435), 'torch.pow', 'torch.pow', (['(10.0)', '(0.1 * x)'], {}), '(10.0, 0.1 * x)\n', (10420, 10435), False, 'import torch\n'), ((10911, 10941), 'math.log10', 'math.log10', (['(1.0 + freq / 700.0)'], {}), '(1.0 + freq / 700.0)\n', (10921, 10941), False, 'import math\n'), ((15743, 15757), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (15752, 15757), False, 'import math\n'), ((16538, 16551), 'torch.sign', 'torch.sign', (['x'], {}), '(x)\n', (16548, 16551), False, 'import torch\n'), ((17396, 17409), 'torch.sign', 'torch.sign', (['x'], {}), '(x)\n', (17406, 17409), False, 'import torch\n'), ((20954, 20988), 'torch.round', 'torch.round', (['(phase / (2 * math.pi))'], {}), '(phase / (2 * math.pi))\n', (20965, 20988), False, 'import torch\n'), ((25946, 26000), 'torch.arange', 'torch.arange', (['(-n)', '(n + 1)', '(1)'], {'device': 'device', 'dtype': 'dtype'}), '(-n, n + 1, 1, device=device, dtype=dtype)\n', (25958, 26000), False, 'import torch\n'), ((33407, 33431), 'torch.sum', 'torch.sum', (['input_part', '(1)'], {}), '(input_part, 1)\n', (33416, 33431), False, 'import torch\n'), ((35925, 36011), 'torch.linspace', 'torch.linspace', (['(0)', '(sample_rate // 2)'], {'steps': '(1 + n_fft // 2)', 'device': 'specgram.device'}), '(0, sample_rate // 2, steps=1 + n_fft // 2, device=specgram.\n device)\n', (35939, 36011), False, 'import torch\n'), ((6911, 7034), 'torch.istft', 'torch.istft', (['(specgram * angles)'], {'n_fft': 'n_fft', 'hop_length': 'hop_length', 'win_length': 'win_length', 'window': 'window', 'length': 'length'}), '(specgram * angles, n_fft=n_fft, hop_length=hop_length,\n win_length=win_length, window=window, length=length)\n', (6922, 7034), False, 'import torch\n'), ((11242, 11269), 'math.log', 'math.log', (['(freq / min_log_hz)'], {}), '(freq / min_log_hz)\n', (11250, 11269), False, 'import math\n'), ((34322, 34386), 'torch.zeros', 'torch.zeros', (['num_channels', 'num_feats'], {'dtype': 'dtype', 'device': 'device'}), '(num_channels, num_feats, dtype=dtype, device=device)\n', (34333, 34386), False, 'import torch\n'), ((34610, 34635), 'torch.pow', 'torch.pow', (['variance', '(-0.5)'], {}), '(variance, -0.5)\n', (34619, 34635), False, 'import torch\n'), ((16571, 16583), 'torch.abs', 'torch.abs', (['x'], {}), '(x)\n', (16580, 16583), False, 'import torch\n'), ((33487, 33519), 'torch.cumsum', 'torch.cumsum', (['(input_part ** 2)', '(1)'], {}), '(input_part ** 2, 1)\n', (33499, 33519), False, 'import torch\n'), ((17423, 17435), 'torch.abs', 'torch.abs', (['x'], {}), '(x)\n', (17432, 17435), False, 'import torch\n'), ((17438, 17453), 'torch.log1p', 'torch.log1p', (['mu'], {}), '(mu)\n', (17449, 17453), False, 'import torch\n'), ((27583, 27610), 'torch.norm', 'torch.norm', (['s2'], {'p': '(2)', 'dim': '(-1)'}), '(s2, p=2, dim=-1)\n', (27593, 27610), False, 'import torch\n'), ((27522, 27549), 'torch.norm', 'torch.norm', (['s1'], {'p': '(2)', 'dim': '(-1)'}), '(s1, p=2, dim=-1)\n', (27532, 27549), False, 'import torch\n')] |
Faust-Wang/vswarm | src/status_node.py | d18ce643218c18ef1e762f40562104b2a0926ad7 | #!/usr/bin/env python3
from __future__ import absolute_import, division, print_function
import curses
import sys
from collections import deque
from datetime import datetime
import numpy as np
import rospy
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from geometry_msgs.msg import PoseStamped
from mavros_msgs.msg import ExtendedState, PositionTarget, State # StatusText
from scipy.spatial.transform import Rotation as R
from sensor_msgs.msg import BatteryState, Image, NavSatFix
GPS_FIX_DICT = {
0: ('No GPS', curses.COLOR_RED),
1: ('No fix', curses.COLOR_RED),
2: ('2D lock', curses.COLOR_BLUE),
3: ('3D lock', curses.COLOR_BLUE),
4: ('DGPS', curses.COLOR_MAGENTA),
5: ('RTK float', curses.COLOR_YELLOW),
6: ('RTK fix', curses.COLOR_GREEN)
}
def get_color(color):
return curses.color_pair(color)
def frequency_from_messages(messages):
durations = []
for i in range(len(messages) - 1):
duration = messages[i + 1].header.stamp - messages[i].header.stamp
durations.append(duration.to_sec())
frequency = 1 / np.mean(durations)
if np.isnan(frequency):
return 0
return frequency
class StatusNode:
def __init__(self, screen):
rospy.init_node('status_node', argv=sys.argv)
self.rate = rospy.get_param('~rate', default=1.0)
# Curses setup
self.screen = curses.initscr()
self.rows, self.cols = self.screen.getmaxyx()
height_status = 15
self.status = curses.newwin(height_status, self.cols, 1, 2)
# self.console = curses.newwin(self.rows - height_status, self.cols, 12, 2)
self.lines = 0
self.text = ''
self.screen.keypad(True)
curses.curs_set(False) # Hide cursor
colors = [curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_CYAN,
curses.COLOR_GREEN, curses.COLOR_MAGENTA, curses.COLOR_RED,
curses.COLOR_WHITE, curses.COLOR_YELLOW]
# Curses color setup
curses.use_default_colors()
for color in colors:
curses.init_pair(color, color, -1)
# Default variables
self.status_battery_perc = None
self.state = State()
self.state_sub = rospy.Subscriber('mavros/state', State,
callback=self.state_callback,
queue_size=1)
self.battery = BatteryState()
self.battery_sub = rospy.Subscriber('mavros/battery', BatteryState,
callback=self.battery_callback,
queue_size=1)
self.extended = ExtendedState()
self.extended_sub = rospy.Subscriber('mavros/extended_state', ExtendedState,
callback=self.extended_callback,
queue_size=1)
# self.statustext = StatusText()
# self.statustext_sub = rospy.Subscriber('mavros/statustext/recv', StatusText,
# callback=self.statustext_callback,
# queue_size=1)
self.gps = NavSatFix()
self.gps_sub = rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix,
callback=self.gps_callback,
queue_size=1)
self.local_pose = PoseStamped()
self.local_pose_sub = rospy.Subscriber('mavros/local_position/pose', PoseStamped,
callback=self.local_pose_callback,
queue_size=1)
self.global_pose = PoseStamped()
self.global_pose_sub = rospy.Subscriber('global_position/pose', PoseStamped,
callback=self.global_pose_callback,
queue_size=1)
self.diagnostics = DiagnosticArray()
self.diagnostic_gps = DiagnosticStatus()
self.diagnostics_sub = rospy.Subscriber('/diagnostics', DiagnosticArray,
callback=self.diagnostics_callback,
queue_size=1)
self.setpoint = PositionTarget()
self.setpoint_sub = rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget,
callback=self.setpoint_callback,
queue_size=1)
self.cameras = ['front', 'right', 'back', 'left']
self.image_subscribers = []
self.images = {c: deque(maxlen=10) for c in self.cameras}
for camera in self.cameras:
topic = f'camera_{camera}/image_raw'
subscriber = rospy.Subscriber(topic, Image, callback=self.image_callback,
callback_args=camera, queue_size=1,
buff_size=2 ** 24)
self.image_subscribers.append(subscriber)
def battery_callback(self, battery_msg):
if battery_msg.location == 'id0':
self.battery = battery_msg
def state_callback(self, state_msg):
self.state = state_msg
def extended_callback(self, extended_msg):
self.extended = extended_msg
def diagnostics_callback(self, diagnostics_msg):
for status in diagnostics_msg.status:
if 'GPS' in status.name:
self.diagnostic_gps = status
def gps_callback(self, gps_msg):
self.gps = gps_msg
def local_pose_callback(self, pose_msg):
self.local_pose = pose_msg
def global_pose_callback(self, pose_msg):
self.global_pose = pose_msg
def setpoint_callback(self, setpoint_msg):
self.setpoint = setpoint_msg
def image_callback(self, image_msg, camera):
self.images[camera].append(image_msg)
def statustext_callback(self, statustext_msg):
screen = self.console
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# time_str = datetime.datetime.fromtimestamp(unix_time)
text = statustext_msg.text
severity = statustext_msg.severity
msg = statustext_msg
severity_red = [msg.EMERGENCY, msg.ALERT, msg.CRITICAL, msg.ERROR]
severity_yellow = [msg.WARNING, msg.NOTICE]
severity_neutral = [msg.INFO, msg.DEBUG]
color = curses.COLOR_CYAN
if severity in severity_red:
color = curses.COLOR_RED
elif severity in severity_yellow:
color = curses.COLOR_YELLOW
elif severity in severity_neutral:
color = curses.COLOR_WHITE
self.text = f'{time_str}: {text} ({color})'
# screen.addstr(self.lines, 0, log, get_color(color))
self.lines += 1
screen.refresh()
def print_status(self):
screen = self.status
screen.clear()
# rospy.loginfo(status)
# print(status)
x_tab = 0
x_indent = 14
row = 0
# Battery
battery_percentage = int(self.battery.percentage * 100)
color = curses.COLOR_CYAN
if battery_percentage > 50:
color = curses.COLOR_GREEN
elif battery_percentage > 25:
color = curses.COLOR_YELLOW
elif battery_percentage > 0:
color = curses.COLOR_RED
status_battery = str(battery_percentage) + '%'
screen.addstr(row, x_tab, 'Battery: ')
screen.addstr(row, x_indent, status_battery, get_color(color))
row += 1
# Armed
if self.state.armed:
color = curses.COLOR_RED
status_armed = 'Yes'
else:
color = curses.COLOR_GREEN
status_armed = 'No'
screen.addstr(row, x_tab, 'Armed: ')
screen.addstr(row, x_indent, status_armed, get_color(color))
row += 1
# Mode
color = curses.COLOR_CYAN
mode = self.state.mode
if mode.startswith('AUTO'):
mode = mode.split('.')[-1]
mode = mode.capitalize()
if mode == 'Offboard':
color = curses.COLOR_RED
else:
color = curses.COLOR_BLUE
if mode == '':
mode = 'None'
elif mode == 'Posctl':
mode = 'Position'
elif mode == 'Rtl':
mode = 'Return'
status_mode = '{}'.format(mode)
screen.addstr(row, x_tab, 'Mode: ')
screen.addstr(row, x_indent, status_mode, get_color(color))
row += 1
# Extended status
if self.extended.landed_state == self.extended.LANDED_STATE_IN_AIR:
status_extended = 'Air'
color = curses.COLOR_RED
elif self.extended.landed_state == self.extended.LANDED_STATE_LANDING:
status_extended = 'Landed'
color = curses.COLOR_GREEN
elif self.extended.landed_state == self.extended.LANDED_STATE_ON_GROUND:
status_extended = 'Ground'
color = curses.COLOR_GREEN
elif self.extended.landed_state == self.extended.LANDED_STATE_TAKEOFF:
status_extended = 'Takeoff'
color = curses.COLOR_RED
elif self.extended.landed_state == self.extended.LANDED_STATE_UNDEFINED:
status_extended = 'Undefined'
color = curses.COLOR_CYAN
screen.addstr(row, x_tab, 'State: ')
screen.addstr(row, x_indent, status_extended, get_color(color))
row += 1
# GPS info
satellites = 0
fix_type, color = GPS_FIX_DICT[0]
for value in self.diagnostic_gps.values:
if value.key == 'Satellites visible':
satellites = value.value
elif value.key == 'Fix type':
fix_type, color = GPS_FIX_DICT[int(value.value)]
screen.addstr(row, x_tab, 'GPS info: ')
screen.addstr(row, x_indent, f'{fix_type} ({satellites} sat)', get_color(color))
row += 2
# GPS pos
latitude = self.gps.latitude
longitude = self.gps.longitude
altitude = round(self.gps.altitude, 2)
status_gps = f'{latitude:.7f} {longitude:.7f} {altitude:.2f} (LLA)'
screen.addstr(row, x_tab, 'GPS pos: ')
screen.addstr(row, x_indent, status_gps)
row += 1
# Local pose
p = self.local_pose.pose.position
q = self.local_pose.pose.orientation
quaternion = [q.x, q.y, q.z, q.w]
try:
rot = R.from_quat(quaternion)
except ValueError:
rot = R.from_euler('zyx', [0.0, 0.0, 0.0])
yaw, pitch, roll = rot.as_euler('zyx', degrees=True)
x, y, z = round(p.x, 2), round(p.y, 2), round(p.z, 2)
yaw, pitch, roll = int(yaw), int(pitch), int(roll)
screen.addstr(row, x_tab, 'Local pos: ')
screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)')
row += 1
# Global pose
p = self.global_pose.pose.position
q = self.global_pose.pose.orientation
quaternion = [q.x, q.y, q.z, q.w]
try:
rot = R.from_quat(quaternion)
except ValueError:
rot = R.from_euler('zyx', [0.0, 0.0, 0.0])
yaw, pitch, roll = rot.as_euler('zyx', degrees=True)
x, y, z = round(p.x, 2), round(p.y, 2), round(p.z, 2)
yaw, pitch, roll = int(yaw), int(pitch), int(roll)
screen.addstr(row, x_tab, 'Global pos: ')
screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)')
row += 1
# Setpoint
v = self.setpoint.velocity
vx, vy, vz = round(v.x, 2), round(v.y, 2), round(v.z, 2)
yaw = int(np.rad2deg(self.setpoint.yaw))
screen.addstr(row, x_tab, 'Setpoint: ')
screen.addstr(row, x_indent, f'{vx:.2f} {vy:.2f} {vz:.2f} (XYZ) {yaw} (Y)')
row += 1
# Cameras
freqs = {c: 0 for c in self.cameras}
for cam, messages in self.images.items():
freqs[cam] = frequency_from_messages(messages)
ff, fr, fb, fl = [int(round(v)) for k, v in freqs.items()]
screen.addstr(row, x_tab, 'Cameras: ')
screen.addstr(row, x_indent, f'{ff} {fr} {fb} {fl} (front right back left [Hz])')
row += 1
screen.refresh()
self.screen.refresh()
def run(self):
rate = rospy.Rate(self.rate)
try:
while not rospy.is_shutdown():
self.print_status()
rate.sleep()
except rospy.ROSInterruptException:
curses.nocbreak()
self.screen.keypad(False)
curses.echo()
def curses_main(screen):
StatusNode(screen).run()
def main():
try:
curses.wrapper(curses_main)
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main()
| [((831, 855), 'curses.color_pair', 'curses.color_pair', (['color'], {}), '(color)\n', (848, 855), False, 'import curses\n'), ((1120, 1139), 'numpy.isnan', 'np.isnan', (['frequency'], {}), '(frequency)\n', (1128, 1139), True, 'import numpy as np\n'), ((1094, 1112), 'numpy.mean', 'np.mean', (['durations'], {}), '(durations)\n', (1101, 1112), True, 'import numpy as np\n'), ((1241, 1286), 'rospy.init_node', 'rospy.init_node', (['"""status_node"""'], {'argv': 'sys.argv'}), "('status_node', argv=sys.argv)\n", (1256, 1286), False, 'import rospy\n'), ((1308, 1345), 'rospy.get_param', 'rospy.get_param', (['"""~rate"""'], {'default': '(1.0)'}), "('~rate', default=1.0)\n", (1323, 1345), False, 'import rospy\n'), ((1392, 1408), 'curses.initscr', 'curses.initscr', ([], {}), '()\n', (1406, 1408), False, 'import curses\n'), ((1514, 1559), 'curses.newwin', 'curses.newwin', (['height_status', 'self.cols', '(1)', '(2)'], {}), '(height_status, self.cols, 1, 2)\n', (1527, 1559), False, 'import curses\n'), ((1732, 1754), 'curses.curs_set', 'curses.curs_set', (['(False)'], {}), '(False)\n', (1747, 1754), False, 'import curses\n'), ((2022, 2049), 'curses.use_default_colors', 'curses.use_default_colors', ([], {}), '()\n', (2047, 2049), False, 'import curses\n'), ((2217, 2224), 'mavros_msgs.msg.State', 'State', ([], {}), '()\n', (2222, 2224), False, 'from mavros_msgs.msg import ExtendedState, PositionTarget, State\n'), ((2250, 2337), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/state"""', 'State'], {'callback': 'self.state_callback', 'queue_size': '(1)'}), "('mavros/state', State, callback=self.state_callback,\n queue_size=1)\n", (2266, 2337), False, 'import rospy\n'), ((2442, 2456), 'sensor_msgs.msg.BatteryState', 'BatteryState', ([], {}), '()\n', (2454, 2456), False, 'from sensor_msgs.msg import BatteryState, Image, NavSatFix\n'), ((2484, 2583), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/battery"""', 'BatteryState'], {'callback': 'self.battery_callback', 'queue_size': '(1)'}), "('mavros/battery', BatteryState, callback=self.\n battery_callback, queue_size=1)\n", (2500, 2583), False, 'import rospy\n'), ((2692, 2707), 'mavros_msgs.msg.ExtendedState', 'ExtendedState', ([], {}), '()\n', (2705, 2707), False, 'from mavros_msgs.msg import ExtendedState, PositionTarget, State\n'), ((2736, 2844), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/extended_state"""', 'ExtendedState'], {'callback': 'self.extended_callback', 'queue_size': '(1)'}), "('mavros/extended_state', ExtendedState, callback=self.\n extended_callback, queue_size=1)\n", (2752, 2844), False, 'import rospy\n'), ((3226, 3237), 'sensor_msgs.msg.NavSatFix', 'NavSatFix', ([], {}), '()\n', (3235, 3237), False, 'from sensor_msgs.msg import BatteryState, Image, NavSatFix\n'), ((3261, 3369), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/global_position/raw/fix"""', 'NavSatFix'], {'callback': 'self.gps_callback', 'queue_size': '(1)'}), "('mavros/global_position/raw/fix', NavSatFix, callback=self\n .gps_callback, queue_size=1)\n", (3277, 3369), False, 'import rospy\n'), ((3472, 3485), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (3483, 3485), False, 'from geometry_msgs.msg import PoseStamped\n'), ((3516, 3629), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/local_position/pose"""', 'PoseStamped'], {'callback': 'self.local_pose_callback', 'queue_size': '(1)'}), "('mavros/local_position/pose', PoseStamped, callback=self.\n local_pose_callback, queue_size=1)\n", (3532, 3629), False, 'import rospy\n'), ((3747, 3760), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (3758, 3760), False, 'from geometry_msgs.msg import PoseStamped\n'), ((3792, 3900), 'rospy.Subscriber', 'rospy.Subscriber', (['"""global_position/pose"""', 'PoseStamped'], {'callback': 'self.global_pose_callback', 'queue_size': '(1)'}), "('global_position/pose', PoseStamped, callback=self.\n global_pose_callback, queue_size=1)\n", (3808, 3900), False, 'import rospy\n'), ((4020, 4037), 'diagnostic_msgs.msg.DiagnosticArray', 'DiagnosticArray', ([], {}), '()\n', (4035, 4037), False, 'from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus\n'), ((4068, 4086), 'diagnostic_msgs.msg.DiagnosticStatus', 'DiagnosticStatus', ([], {}), '()\n', (4084, 4086), False, 'from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus\n'), ((4118, 4222), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/diagnostics"""', 'DiagnosticArray'], {'callback': 'self.diagnostics_callback', 'queue_size': '(1)'}), "('/diagnostics', DiagnosticArray, callback=self.\n diagnostics_callback, queue_size=1)\n", (4134, 4222), False, 'import rospy\n'), ((4339, 4355), 'mavros_msgs.msg.PositionTarget', 'PositionTarget', ([], {}), '()\n', (4353, 4355), False, 'from mavros_msgs.msg import ExtendedState, PositionTarget, State\n'), ((4384, 4497), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/setpoint_raw/local"""', 'PositionTarget'], {'callback': 'self.setpoint_callback', 'queue_size': '(1)'}), "('mavros/setpoint_raw/local', PositionTarget, callback=self\n .setpoint_callback, queue_size=1)\n", (4400, 4497), False, 'import rospy\n'), ((12475, 12496), 'rospy.Rate', 'rospy.Rate', (['self.rate'], {}), '(self.rate)\n', (12485, 12496), False, 'import rospy\n'), ((12843, 12870), 'curses.wrapper', 'curses.wrapper', (['curses_main'], {}), '(curses_main)\n', (12857, 12870), False, 'import curses\n'), ((2091, 2125), 'curses.init_pair', 'curses.init_pair', (['color', 'color', '(-1)'], {}), '(color, color, -1)\n', (2107, 2125), False, 'import curses\n'), ((4704, 4720), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (4709, 4720), False, 'from collections import deque\n'), ((4854, 4974), 'rospy.Subscriber', 'rospy.Subscriber', (['topic', 'Image'], {'callback': 'self.image_callback', 'callback_args': 'camera', 'queue_size': '(1)', 'buff_size': '(2 ** 24)'}), '(topic, Image, callback=self.image_callback, callback_args=\n camera, queue_size=1, buff_size=2 ** 24)\n', (4870, 4974), False, 'import rospy\n'), ((10572, 10595), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['quaternion'], {}), '(quaternion)\n', (10583, 10595), True, 'from scipy.spatial.transform import Rotation as R\n'), ((11212, 11235), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['quaternion'], {}), '(quaternion)\n', (11223, 11235), True, 'from scipy.spatial.transform import Rotation as R\n'), ((11806, 11835), 'numpy.rad2deg', 'np.rad2deg', (['self.setpoint.yaw'], {}), '(self.setpoint.yaw)\n', (11816, 11835), True, 'import numpy as np\n'), ((6087, 6101), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6099, 6101), False, 'from datetime import datetime\n'), ((10641, 10677), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""zyx"""', '[0.0, 0.0, 0.0]'], {}), "('zyx', [0.0, 0.0, 0.0])\n", (10653, 10677), True, 'from scipy.spatial.transform import Rotation as R\n'), ((11281, 11317), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""zyx"""', '[0.0, 0.0, 0.0]'], {}), "('zyx', [0.0, 0.0, 0.0])\n", (11293, 11317), True, 'from scipy.spatial.transform import Rotation as R\n'), ((12532, 12551), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (12549, 12551), False, 'import rospy\n'), ((12674, 12691), 'curses.nocbreak', 'curses.nocbreak', ([], {}), '()\n', (12689, 12691), False, 'import curses\n'), ((12742, 12755), 'curses.echo', 'curses.echo', ([], {}), '()\n', (12753, 12755), False, 'import curses\n')] |
mo-schmid/MIALab | bin/boxplot_param.py | 8a7e183df7007993e8a28513a73dca20bfd60737 | import argparse
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
class ResultParam():
"""Result Parameter"""
def __init__(self, path: Path, param_str: str):
"""Initializes a new instance of the Result Parameter
Args:
path (Path): path to the desired result file
param_str (str): string containing the parameters used in the postprocessing
"""
self.path = path
self.param_str = param_str
def set_box_format(bp, color):
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['caps'], linewidth=1)
plt.setp(bp['medians'], color='red')
plt.setp(bp['medians'], linewidth=1.5)
plt.setp(bp['fliers'], marker='.')
plt.setp(bp['fliers'], markerfacecolor='black')
plt.setp(bp['fliers'], alpha=1)
def boxplot(file_path: str, data: list, title: str, x_label: str, y_label: str, x_ticks: tuple,
min_: float = None, max_: float = None):
if len(data) != len(x_ticks):
raise ValueError('arguments data and x_ticks need to have same length')
fig = plt.figure(
figsize=( 2 *1.5, 5*1.5)) # figsize defaults to (width, height) =(6.4, 4.8),
# for boxplots, we want the ratio to be inversed
ax = fig.add_subplot(111) # create an axes instance (nrows=ncols=index)
bp = ax.boxplot(data, widths=0.6)
set_box_format(bp, '000')
# set and format litle, labels, and ticks
ax.set_title(title, fontweight='bold', fontsize=20)
ax.set_ylabel(y_label, fontweight='bold', fontsize=18)
# ax.set_xlabel(x_label, fontweight='bold', fontsize=9.5) # we don't use the x-label since it should be clear from the x-ticks
ax.yaxis.set_tick_params(labelsize=12)
ax.set_xticklabels(x_ticks, fontdict={'fontsize': 18, 'fontweight': 'bold'}, rotation=45)
# remove frame
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# thicken frame
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
# adjust min and max if provided
if min_ is not None or max_ is not None:
min_original, max_original = ax.get_ylim()
min_ = min_ if min_ is not None and min_ < min_original else min_original
max_ = max_ if max_ is not None and max_ > max_original else max_original
ax.set_ylim(min_, max_)
plt.savefig(file_path, bbox_inches="tight")
plt.close()
def format_data(data, label: str, metric: str):
return data[data['LABEL'] == label][metric].values
def metric_to_readable_text(metric: str):
if metric == 'DICE':
return 'Dice coefficient'
elif metric == 'HDRFDST':
return 'Hausdorff distance (mm)'
else:
raise ValueError('Metric "{}" unknown'.format(metric))
def main(results: [ResultParam], plot_dir: Path):
"""generates box plots comparing two or more result sets for all labels
Args:
results ([ResultParam]): a list of result parameters (Path and description)
plot_dir: ath to the desired result folder to store the qq-plots
"""
metrics = ('DICE', 'HDRFDST') # the metrics we want to plot the results for
metrics_yaxis_limits = ((0.0, 1.0), (0.0, 18)) # tuples of y-axis limits (min, max) for each metric. Use None if unknown
labels = ('WhiteMatter','GreyMatter', 'Hippocampus','Amygdala','Thalamus') # the brain structures/tissues you are interested in
# load the CSVs. We usually want to compare different methods (e.g. a set of different features), therefore,
# we load two CSV (for simplicity, it is the same here)
# todo: adapt to your needs to compare different methods (e.g. load different CSVs)
dfs = []
methods = []
for res in results:
dfs.append(pd.read_csv(res.path, sep=';'))
methods.append(res.param_str)
# todo: read parameter values from text file, use them to plot the information about the paramter
# some parameters to improve the plot's readability
title = '{}'
for label in labels:
for metric, (min_, max_) in zip(metrics, metrics_yaxis_limits):
boxplot(os.path.join(plot_dir, '{}_{}.png'.format(label, metric)),
[format_data(df, label, metric) for df in dfs],
title.format(label),
'Method', metric_to_readable_text(metric),
methods,
min_, max_
)
if __name__ == '__main__':
results = []
results.append(ResultParam(Path(Path.cwd() / "mia-result\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv"),
"no pp"))
results.append(ResultParam(Path(Path.cwd() /"mia-result/gridsearch_PKF/2020-12-11-09-51-54/with_PP/PP-V-20_0-BG-True/results.csv"),
"with pp"))
main(results, Path(Path.cwd() / 'mia-result/plot_results'))
| [((45, 66), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (59, 66), False, 'import matplotlib\n'), ((580, 614), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['boxes']"], {'color': 'color'}), "(bp['boxes'], color=color)\n", (588, 614), True, 'import matplotlib.pyplot as plt\n'), ((619, 656), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['whiskers']"], {'color': 'color'}), "(bp['whiskers'], color=color)\n", (627, 656), True, 'import matplotlib.pyplot as plt\n'), ((661, 694), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['caps']"], {'color': 'color'}), "(bp['caps'], color=color)\n", (669, 694), True, 'import matplotlib.pyplot as plt\n'), ((699, 732), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['caps']"], {'linewidth': '(1)'}), "(bp['caps'], linewidth=1)\n", (707, 732), True, 'import matplotlib.pyplot as plt\n'), ((737, 773), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['medians']"], {'color': '"""red"""'}), "(bp['medians'], color='red')\n", (745, 773), True, 'import matplotlib.pyplot as plt\n'), ((778, 816), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['medians']"], {'linewidth': '(1.5)'}), "(bp['medians'], linewidth=1.5)\n", (786, 816), True, 'import matplotlib.pyplot as plt\n'), ((821, 855), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['fliers']"], {'marker': '"""."""'}), "(bp['fliers'], marker='.')\n", (829, 855), True, 'import matplotlib.pyplot as plt\n'), ((860, 907), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['fliers']"], {'markerfacecolor': '"""black"""'}), "(bp['fliers'], markerfacecolor='black')\n", (868, 907), True, 'import matplotlib.pyplot as plt\n'), ((912, 943), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['fliers']"], {'alpha': '(1)'}), "(bp['fliers'], alpha=1)\n", (920, 943), True, 'import matplotlib.pyplot as plt\n'), ((1220, 1258), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2 * 1.5, 5 * 1.5)'}), '(figsize=(2 * 1.5, 5 * 1.5))\n', (1230, 1258), True, 'import matplotlib.pyplot as plt\n'), ((2486, 2529), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_path'], {'bbox_inches': '"""tight"""'}), "(file_path, bbox_inches='tight')\n", (2497, 2529), True, 'import matplotlib.pyplot as plt\n'), ((2534, 2545), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2543, 2545), True, 'import matplotlib.pyplot as plt\n'), ((3880, 3910), 'pandas.read_csv', 'pd.read_csv', (['res.path'], {'sep': '""";"""'}), "(res.path, sep=';')\n", (3891, 3910), True, 'import pandas as pd\n'), ((4969, 4979), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (4977, 4979), False, 'from pathlib import Path\n'), ((4643, 4653), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (4651, 4653), False, 'from pathlib import Path\n'), ((4801, 4811), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (4809, 4811), False, 'from pathlib import Path\n')] |
NREL/EMeRGE | EMeRGE/dssmetrics/constants.py | 573e86ca8e62080c664998e8cc79e9231e7ad502 |
""" Default values : DO NOT CHANGE !!!"""
LOG_FORMAT = "%(asctime)s: %(levelname)s: %(message)s"
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
MAXITERATIONS = 100
LIFE_PARAMETERS = {"theta_i":30,"theta_fl":36,"theta_gfl":28.6,
"R":4.87,"n":1,"tau":3.5,"m":1,"A":-13.391,
"B":6972.15,"num_of_iteration":4,}
DEFAULT_TEMP = 25
MAX_TRANS_LOADING = 1.5
DEFAULT_CONFIGURATION = {
"dss_filepath": "",
"dss_filename":"",
"extra_data_path": ".",
"export_folder":"",
"start_time":"2018-1-1 0:0:0",
"end_time":"2018-2-1 0:0:0",
"simulation_time_step (minute)": 15,
"frequency": 50,
"upper_voltage": 1.1,
"lower_voltage":0.9,
"record_every": 96,
"export_voltages": False,
"export_lineloadings": False,
"export_transloadings":False,
"export_start_date": "",
"export_end_date": "",
"volt_var": {
"enabled": False,
"yarray": [0.44,0.44,0,0,-0.44,-0.44],
"xarray": [0.7,0.90,0.95,1.05,1.10,1.3]
},
"log_settings": {
"save_in_file": False,
"log_folder": ".",
"log_filename":"logs.log",
"clear_old_log_file": True
}
}
DEFAULT_ADVANCED_CONFIGURATION = {
"project_path": "C:\\Users\\KDUWADI\\Desktop\\NREL_Projects\\CIFF-TANGEDCO\\TANGEDCO\\EMERGE\\Projects",
"active_project":"GR_PALAYAM",
"active_scenario": "FullYear",
"dss_filename":"gr_palayam.dss",
"start_time":"2018-1-1 0:0:0",
"end_time":"2018-1-2 0:0:0",
"simulation_time_step (minute)": 60,
"frequency": 50,
"upper_voltage": 1.1,
"lower_voltage":0.9,
"record_every": 4,
"parallel_simulation":True,
"parallel_process": 1,
"export_voltages": False,
"export_lineloadings": False,
"export_transloadings":False,
"export_start_date": "",
"export_end_date": "",
"volt_var": {
"enabled": True,
"yarray": [0.44,0.44,0,0,-0.44,-0.44],
"xarray": [0.7,0.90,0.95,1.05,1.10,1.3]
},
"log_settings": {
"save_in_file": False,
"log_filename":"",
"clear_old_log_file": True
}
}
VALID_SETTINGS = {
"project_path":{'type':str},
"active_project":{'type':str},
"active_scenario":{'type':str},
"dss_filepath": {'type': str},
"dss_filename":{'type':str},
"export_folder":{'type':str},
"start_time":{'type':str},
"end_time":{'type':str},
"simulation_time_step (minute)":{'type':int},
"frequency": {'type':int,'options':[50,60]},
"upper_voltage": {'type':float,'range':[1,1.5]},
"lower_voltage":{'type':float,'range':[0.8,1]},
"record_every": {'type':int},
"extra_data_path":{'type':str},
"parallel_simulation":{'type':bool},
"parallel_process": {'type':int,'range':[1,4]},
"export_voltages": {'type':bool},
"export_lineloadings": {'type':bool},
"export_transloadings":{'type':bool},
"export_start_date": {'type':str},
"export_end_date": {'type':str},
"volt_var": {
"enabled": {'type':bool},
"yarray": {'type':list},
"xarray": {'type':list}
},
"log_settings": {
"save_in_file": {'type':bool},
"log_folder": {'type':str},
"log_filename":{'type':str},
"clear_old_log_file": {'type':bool}
}
}
| [] |
MathisFederico/Minesweeper | minesweeper/game.py | b66b41066e325813b24497d2caca0a11c048e18b | try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from . import images
from gym import Env, spaces
from time import time
import numpy as np
from copy import copy
import colorsys
import pygame
from pygame.transform import scale
class MinesweeperEnv(Env):
def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False):
self.grid_shape = grid_shape
self.grid_size = np.prod(grid_shape)
self.n_bombs = max(1, int(bombs_density * self.grid_size)) if n_bombs is None else n_bombs
self.n_bombs = min(self.grid_size - 1, self.n_bombs)
self.flaged_bombs = 0
self.flaged_empty = 0
self.max_time = max_time
if impact_size % 2 == 0:
raise ValueError('Impact_size must be an odd number !')
self.impact_size = impact_size
# Define constants
self.HIDDEN = 0
self.REVEAL = 1
self.FLAG = 2
self.BOMB = self.impact_size ** 2
# Setting up gym Env conventions
nvec_observation = (self.BOMB + 2) * np.ones(self.grid_shape)
self.observation_space = spaces.MultiDiscrete(nvec_observation)
nvec_action = np.array(self.grid_shape + (2,))
self.action_space = spaces.MultiDiscrete(nvec_action)
# Initalize state
self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8)
## Setup bombs places
idx = np.indices(self.grid_shape).reshape(2, -1)
bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False)
self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids]
## Place numbers
self.semi_impact_size = (self.impact_size-1)//2
bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8)
for bombs_id in bombs_ids:
bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id]
x_min, x_max, dx_min, dx_max = self.clip_index(bomb_x, 0)
y_min, y_max, dy_min, dy_max = self.clip_index(bomb_y, 1)
bomb_region = self.state[x_min:x_max, y_min:y_max, 0]
bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max]
## Place bombs
self.state[self.bombs_positions + (0,)] = self.BOMB
self.start_time = time()
self.time_left = int(time() - self.start_time)
# Setup rendering
self.pygame_is_init = False
self.chicken = chicken
self.done = False
self.score = 0
def get_observation(self):
observation = copy(self.state[:, :, 1])
revealed = observation == 1
flaged = observation == 2
observation += self.impact_size ** 2 + 1
observation[revealed] = copy(self.state[:, :, 0][revealed])
observation[flaged] -= 1
return observation
def reveal_around(self, coords, reward, done, without_loss=False):
if not done:
x_min, x_max, _, _ = self.clip_index(coords[0], 0)
y_min, y_max, _, _ = self.clip_index(coords[1], 1)
region = self.state[x_min:x_max, y_min:y_max, :]
unseen_around = np.sum(region[..., 1] == 0)
if unseen_around == 0:
if not without_loss:
reward -= 0.001
return
flags_around = np.sum(region[..., 1] == 2)
if flags_around == self.state[coords + (0,)]:
unrevealed_zeros_around = np.logical_and(region[..., 0] == 0, region[..., 1] == self.HIDDEN)
if np.any(unrevealed_zeros_around):
zeros_coords = np.argwhere(unrevealed_zeros_around)
for zero in zeros_coords:
coord = (x_min + zero[0], y_min + zero[1])
self.state[coord + (1,)] = 1
self.reveal_around(coord, reward, done, without_loss=True)
self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max, 1] != self.FLAG] = 1
unflagged_bombs_around = np.logical_and(region[..., 0] == self.BOMB, region[..., 1] != self.FLAG)
if np.any(unflagged_bombs_around):
self.done = True
reward, done = -1, True
else:
if not without_loss:
reward -= 0.001
def clip_index(self, x, axis):
max_idx = self.grid_shape[axis]
x_min, x_max = max(0, x-self.semi_impact_size), min(max_idx, x + self.semi_impact_size + 1)
dx_min, dx_max = x_min - (x - self.semi_impact_size), x_max - (x + self.semi_impact_size + 1) + self.impact_size
return x_min, x_max, dx_min, dx_max
def step(self, action):
coords = action[:2]
action_type = action[2] + 1 # 0 -> 1 = reveal; 1 -> 2 = toggle_flag
case_state = self.state[coords + (1,)]
case_content = self.state[coords + (0,)]
NO_BOMBS_AROUND = 0
reward, done = 0, False
self.time_left = self.max_time - time() + self.start_time
if self.time_left <= 0:
score = -(self.n_bombs - self.flaged_bombs + self.flaged_empty)/self.n_bombs
reward, done = score, True
return self.get_observation(), reward, done, {'passed':False}
if action_type == self.REVEAL:
if case_state == self.HIDDEN:
self.state[coords + (1,)] = action_type
if case_content == self.BOMB:
if self.pygame_is_init: self.done = True
reward, done = -1, True
return self.get_observation(), reward, done, {'passed':False}
elif case_content == NO_BOMBS_AROUND:
self.reveal_around(coords, reward, done)
elif case_state == self.REVEAL:
self.reveal_around(coords, reward, done)
reward -= 0.01
else:
reward -= 0.001
self.score += reward
return self.get_observation(), reward, done, {'passed':True}
elif action_type == self.FLAG:
if case_state == self.REVEAL:
reward -= 0.001
else:
flaging = 1
if case_state == self.FLAG:
flaging = -1
self.state[coords + (1,)] = self.HIDDEN
else:
self.state[coords + (1,)] = self.FLAG
if case_content == self.BOMB:
self.flaged_bombs += flaging
else:
self.flaged_empty += flaging
if self.flaged_bombs == self.n_bombs and self.flaged_empty == 0:
reward, done = 2 + self.time_left/self.max_time, True
if np.any(np.logical_and(self.state[..., 0]==9, self.state[..., 1]==1)) or self.done:
reward, done = -1 + self.time_left/self.max_time + (self.flaged_bombs - self.flaged_empty)/self.n_bombs, True
self.score += reward
return self.get_observation(), reward, done, {'passed':False}
def reset(self):
self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken)
return self.get_observation()
def render(self):
if not self.pygame_is_init:
self._init_pygame()
self.pygame_is_init = True
for event in pygame.event.get():
if event.type == pygame.QUIT: # pylint: disable=E1101
pygame.quit() # pylint: disable=E1101
# Plot background
pygame.draw.rect(self.window, (60, 56, 53), (0, 0, self.height, self.width))
# Plot grid
for index, state in np.ndenumerate(self.state[..., 1]):
self._plot_block(index, state)
# Plot infos
## Score
score_text = self.score_font.render("SCORE", 1, (255, 10, 10))
score = self.score_font.render(str(round(self.score, 4)), 1, (255, 10, 10))
self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width))
self.window.blit(score, (0.1*self.header_size, 0.8*self.width))
## Time left
time_text = self.num_font.render("TIME", 1, (255, 10, 10))
self.time_left = self.max_time - time() + self.start_time
time_left = self.num_font.render(str(int(self.time_left+1)), 1, (255, 10, 10))
self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width))
self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width))
## Bombs left
bombs_text = self.num_font.render("BOMBS", 1, (255, 255, 10))
left_text = self.num_font.render("LEFT", 1, (255, 255, 10))
potential_bombs_left = self.n_bombs - self.flaged_bombs - self.flaged_empty
potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)), 1, (255, 255, 10))
self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width))
self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width))
self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width))
pygame.display.flip()
pygame.time.wait(10)
if self.done:
pygame.time.wait(3000)
@staticmethod
def _get_color(n, max_n):
BLUE_HUE = 0.6
RED_HUE = 0.0
HUE = RED_HUE + (BLUE_HUE - RED_HUE) * ((max_n - n) / max_n)**3
color = 255 * np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7))
return color
def _plot_block(self, index, state):
position = tuple(self.origin + self.scale_factor * self.BLOCK_SIZE * np.array((index[1], index[0])))
label = None
if state == self.HIDDEN and not self.done:
img_key = 'hidden'
elif state == self.FLAG:
if not self.done:
img_key = 'flag'
else:
content = self.state[index][0]
if content == self.BOMB:
img_key = 'disabled_mine' if not self.chicken else 'disabled_chicken'
else:
img_key = 'misplaced_flag'
else:
content = self.state[index][0]
if content == self.BOMB:
if state == self.HIDDEN:
img_key = 'mine' if not self.chicken else 'chicken'
else:
img_key = 'exploded_mine' if not self.chicken else 'exploded_chicken'
else:
img_key = 'revealed'
label = self.num_font.render(str(content), 1, self._get_color(content, self.BOMB))
self.window.blit(self.images[img_key], position)
if label: self.window.blit(label, position + self.font_offset - (content > 9) * self.decimal_font_offset)
def _init_pygame(self):
pygame.init() # pylint: disable=E1101
# Open Pygame window
self.scale_factor = 2 * min(12 / self.grid_shape[0], 25 / self.grid_shape[1])
self.BLOCK_SIZE = 32
self.header_size = self.scale_factor * 100
self.origin = np.array([self.header_size, 0])
self.width = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[0])
self.height = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[1] + self.header_size)
self.window = pygame.display.set_mode((self.height, self.width))
# Setup font for numbers
num_font_size = 20
self.num_font = pygame.font.SysFont("monospace", int(self.scale_factor * num_font_size))
self.font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.325, 0.15])
self.decimal_font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.225, 0])
self.score_font = pygame.font.SysFont("monospace", int(self.scale_factor * 12))
# Load images
def scale_image(img, scale_factor=self.scale_factor):
return scale(img, (int(scale_factor*img.get_width()), int(scale_factor*img.get_height())))
images_names = ['hidden', 'revealed', 'flag', 'misplaced_flag']
if self.chicken:
images_names += ['chicken', 'exploded_chicken', 'disabled_chicken']
else:
images_names += ['mine', 'exploded_mine', 'disabled_mine']
self.images = {}
for img_name in images_names:
with pkg_resources.path(images, img_name + '.png') as path:
img = pygame.image.load(str(path)).convert()
self.images[img_name] = scale_image(img)
| [((567, 586), 'numpy.prod', 'np.prod', (['grid_shape'], {}), '(grid_shape)\n', (574, 586), True, 'import numpy as np\n'), ((1274, 1312), 'gym.spaces.MultiDiscrete', 'spaces.MultiDiscrete', (['nvec_observation'], {}), '(nvec_observation)\n', (1294, 1312), False, 'from gym import Env, spaces\n'), ((1336, 1368), 'numpy.array', 'np.array', (['(self.grid_shape + (2,))'], {}), '(self.grid_shape + (2,))\n', (1344, 1368), True, 'import numpy as np\n'), ((1397, 1430), 'gym.spaces.MultiDiscrete', 'spaces.MultiDiscrete', (['nvec_action'], {}), '(nvec_action)\n', (1417, 1430), False, 'from gym import Env, spaces\n'), ((1479, 1527), 'numpy.zeros', 'np.zeros', (['(self.grid_shape + (2,))'], {'dtype': 'np.uint8'}), '(self.grid_shape + (2,), dtype=np.uint8)\n', (1487, 1527), True, 'import numpy as np\n'), ((1882, 1943), 'numpy.ones', 'np.ones', (['(self.impact_size, self.impact_size)'], {'dtype': 'np.uint8'}), '((self.impact_size, self.impact_size), dtype=np.uint8)\n', (1889, 1943), True, 'import numpy as np\n'), ((2428, 2434), 'time.time', 'time', ([], {}), '()\n', (2432, 2434), False, 'from time import time\n'), ((2687, 2714), 'copy.copy', 'copy', (['self.state[:, :, (1)]'], {}), '(self.state[:, :, (1)])\n', (2691, 2714), False, 'from copy import copy\n'), ((2866, 2903), 'copy.copy', 'copy', (['self.state[:, :, (0)][revealed]'], {}), '(self.state[:, :, (0)][revealed])\n', (2870, 2903), False, 'from copy import copy\n'), ((7579, 7597), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (7595, 7597), False, 'import pygame\n'), ((7762, 7838), 'pygame.draw.rect', 'pygame.draw.rect', (['self.window', '(60, 56, 53)', '(0, 0, self.height, self.width)'], {}), '(self.window, (60, 56, 53), (0, 0, self.height, self.width))\n', (7778, 7838), False, 'import pygame\n'), ((7888, 7922), 'numpy.ndenumerate', 'np.ndenumerate', (['self.state[..., 1]'], {}), '(self.state[..., 1])\n', (7902, 7922), True, 'import numpy as np\n'), ((9316, 9337), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (9335, 9337), False, 'import pygame\n'), ((9346, 9366), 'pygame.time.wait', 'pygame.time.wait', (['(10)'], {}), '(10)\n', (9362, 9366), False, 'import pygame\n'), ((10977, 10990), 'pygame.init', 'pygame.init', ([], {}), '()\n', (10988, 10990), False, 'import pygame\n'), ((11233, 11264), 'numpy.array', 'np.array', (['[self.header_size, 0]'], {}), '([self.header_size, 0])\n', (11241, 11264), True, 'import numpy as np\n'), ((11474, 11524), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(self.height, self.width)'], {}), '((self.height, self.width))\n', (11497, 11524), False, 'import pygame\n'), ((1216, 1240), 'numpy.ones', 'np.ones', (['self.grid_shape'], {}), '(self.grid_shape)\n', (1223, 1240), True, 'import numpy as np\n'), ((3272, 3299), 'numpy.sum', 'np.sum', (['(region[..., 1] == 0)'], {}), '(region[..., 1] == 0)\n', (3278, 3299), True, 'import numpy as np\n'), ((3460, 3487), 'numpy.sum', 'np.sum', (['(region[..., 1] == 2)'], {}), '(region[..., 1] == 2)\n', (3466, 3487), True, 'import numpy as np\n'), ((9401, 9423), 'pygame.time.wait', 'pygame.time.wait', (['(3000)'], {}), '(3000)\n', (9417, 9423), False, 'import pygame\n'), ((11748, 11771), 'numpy.array', 'np.array', (['[0.325, 0.15]'], {}), '([0.325, 0.15])\n', (11756, 11771), True, 'import numpy as np\n'), ((11845, 11865), 'numpy.array', 'np.array', (['[0.225, 0]'], {}), '([0.225, 0])\n', (11853, 11865), True, 'import numpy as np\n'), ((1573, 1600), 'numpy.indices', 'np.indices', (['self.grid_shape'], {}), '(self.grid_shape)\n', (1583, 1600), True, 'import numpy as np\n'), ((2464, 2470), 'time.time', 'time', ([], {}), '()\n', (2468, 2470), False, 'from time import time\n'), ((3588, 3654), 'numpy.logical_and', 'np.logical_and', (['(region[..., 0] == 0)', '(region[..., 1] == self.HIDDEN)'], {}), '(region[..., 0] == 0, region[..., 1] == self.HIDDEN)\n', (3602, 3654), True, 'import numpy as np\n'), ((3674, 3705), 'numpy.any', 'np.any', (['unrevealed_zeros_around'], {}), '(unrevealed_zeros_around)\n', (3680, 3705), True, 'import numpy as np\n'), ((4183, 4255), 'numpy.logical_and', 'np.logical_and', (['(region[..., 0] == self.BOMB)', '(region[..., 1] != self.FLAG)'], {}), '(region[..., 0] == self.BOMB, region[..., 1] != self.FLAG)\n', (4197, 4255), True, 'import numpy as np\n'), ((4275, 4305), 'numpy.any', 'np.any', (['unflagged_bombs_around'], {}), '(unflagged_bombs_around)\n', (4281, 4305), True, 'import numpy as np\n'), ((5152, 5158), 'time.time', 'time', ([], {}), '()\n', (5156, 5158), False, 'from time import time\n'), ((6920, 6984), 'numpy.logical_and', 'np.logical_and', (['(self.state[..., 0] == 9)', '(self.state[..., 1] == 1)'], {}), '(self.state[..., 0] == 9, self.state[..., 1] == 1)\n', (6934, 6984), True, 'import numpy as np\n'), ((7681, 7694), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (7692, 7694), False, 'import pygame\n'), ((8451, 8457), 'time.time', 'time', ([], {}), '()\n', (8455, 8457), False, 'from time import time\n'), ((9625, 9657), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['HUE', '(1)', '(0.7)'], {}), '(HUE, 1, 0.7)\n', (9644, 9657), False, 'import colorsys\n'), ((12495, 12540), 'importlib_resources.path', 'pkg_resources.path', (['images', "(img_name + '.png')"], {}), "(images, img_name + '.png')\n", (12513, 12540), True, 'import importlib_resources as pkg_resources\n'), ((3742, 3778), 'numpy.argwhere', 'np.argwhere', (['unrevealed_zeros_around'], {}), '(unrevealed_zeros_around)\n', (3753, 3778), True, 'import numpy as np\n'), ((9799, 9829), 'numpy.array', 'np.array', (['(index[1], index[0])'], {}), '((index[1], index[0]))\n', (9807, 9829), True, 'import numpy as np\n')] |
BReduardokramer/gaia | tests/python/gaia-ui-tests/gaiatest/tests/functional/lockscreen/test_lockscreen_unlock_to_camera_with_passcode.py | c00302cdcd435ab193e8365917cfc6abac9e4f2e | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.lockscreen.app import LockScreen
class TestCameraUnlockWithPasscode(GaiaTestCase):
# Input data
_input_passcode = '7931'
def setUp(self):
GaiaTestCase.setUp(self)
# Turn off geolocation prompt
self.apps.set_permission('System', 'geolocation', 'deny')
self.data_layer.set_setting('lockscreen.passcode-lock.code', self._input_passcode)
self.data_layer.set_setting('lockscreen.passcode-lock.enabled', True)
# this time we need it locked!
self.lockscreen.lock()
self.lock_screen = LockScreen(self.marionette)
def test_unlock_to_camera_with_passcode(self):
# https://github.com/mozilla/gaia-ui-tests/issues/479
camera = self.lock_screen.unlock_to_camera()
self.lock_screen.wait_for_lockscreen_not_visible()
camera.switch_to_camera_frame()
self.assertFalse(camera.is_gallery_button_visible)
camera.tap_switch_source()
camera.wait_for_capture_ready()
self.assertFalse(camera.is_gallery_button_visible)
| [((415, 439), 'gaiatest.GaiaTestCase.setUp', 'GaiaTestCase.setUp', (['self'], {}), '(self)\n', (433, 439), False, 'from gaiatest import GaiaTestCase\n'), ((813, 840), 'gaiatest.apps.lockscreen.app.LockScreen', 'LockScreen', (['self.marionette'], {}), '(self.marionette)\n', (823, 840), False, 'from gaiatest.apps.lockscreen.app import LockScreen\n')] |
CallumMcMahon/MeshCNN | models/layers/mesh_conv.py | 343950a8d69807ed4afa13f1843edb37c4cd042c | import torch
import torch.nn as nn
import torch.nn.functional as F
class MeshConv(nn.Module):
""" Computes convolution between edges and 4 incident (1-ring) edge neighbors
in the forward pass takes:
x: edge features (Batch x Features x Edges)
mesh: list of mesh data-structure (len(mesh) == Batch)
and applies convolution
"""
def __init__(self, in_channels, out_channels, k=5, bias=True):
super(MeshConv, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, k), bias=bias)
self.k = k
def forward(self, x, mesh):
x = x.squeeze(-1)
# pad gemm
G = torch.cat([self.pad_gemm(i, x.shape[2], x.device) for i in mesh], 0)
# build 'neighborhood image' and apply convolution
G = self.create_GeMM(x, G)
x = self.conv(G)
return x
def flatten_gemm_inds(self, Gi):
(b, ne, nn) = Gi.shape
ne += 1
batch_n = torch.floor(torch.arange(b * ne, device=Gi.device).float() / ne).view(b, ne)
add_fac = batch_n * ne
add_fac = add_fac.view(b, ne, 1)
add_fac = add_fac.repeat(1, 1, nn)
# flatten Gi
Gi = Gi.float() + add_fac[:, 1:, :]
return Gi
def create_GeMM(self, x, Gi):
""" gathers the edge features (x) with from the 1-ring indices (Gi)
applys symmetric functions to handle order invariance
returns a 'fake image' which can use 2d convolution on
output dimensions: Batch x Channels x Edges x 5
"""
Gishape = Gi.shape
# pad the first row of every sample in batch with zeros
padding = torch.zeros((x.shape[0], x.shape[1], 1), requires_grad=True, device=x.device)
# add zero feature vector then shift all indices. border edges now reference zero vector
x = torch.cat((padding, x), dim=2)
Gi = Gi + 1 #shift
# first flatten indices
Gi_flat = self.flatten_gemm_inds(Gi)
Gi_flat = Gi_flat.view(-1).long()
#
odim = x.shape
x = x.permute(0, 2, 1).contiguous()
x = x.view(odim[0] * odim[2], odim[1])
# indices of gemm never reference padded section of x so padded section never used
f = torch.index_select(x, dim=0, index=Gi_flat)
f = f.view(Gishape[0], Gishape[1], Gishape[2], -1)
f = f.permute(0, 3, 1, 2)
# apply the symmetric functions for an equivariant convolution
x_1 = f[:, :, :, 1] + f[:, :, :, 3]
x_2 = f[:, :, :, 2] + f[:, :, :, 4]
x_3 = torch.abs(f[:, :, :, 1] - f[:, :, :, 3])
x_4 = torch.abs(f[:, :, :, 2] - f[:, :, :, 4])
f = torch.stack([f[:, :, :, 0], x_1, x_2, x_3, x_4], dim=3)
return f
def pad_gemm(self, m, xsz, device):
""" extracts one-ring neighbors (4x) -> m.gemm_edges
which is of size #edges x 4
add the edge_id itself to make #edges x 5
then pad to desired size e.g., xsz x 5
"""
padded_gemm = torch.tensor(m.gemm_edges, device=device).float()
padded_gemm = padded_gemm.requires_grad_()
padded_gemm = torch.cat((torch.arange(int(m.edges_count), device=device).float().unsqueeze(1), padded_gemm), dim=1)
# pad using F
padded_gemm = F.pad(padded_gemm, (0, 0, 0, xsz - m.edges_count), "constant", 0)
padded_gemm = padded_gemm.unsqueeze(0)
return padded_gemm | [((479, 576), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': '(1, k)', 'bias': 'bias'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=(\n 1, k), bias=bias)\n', (488, 576), True, 'import torch.nn as nn\n'), ((1678, 1755), 'torch.zeros', 'torch.zeros', (['(x.shape[0], x.shape[1], 1)'], {'requires_grad': '(True)', 'device': 'x.device'}), '((x.shape[0], x.shape[1], 1), requires_grad=True, device=x.device)\n', (1689, 1755), False, 'import torch\n'), ((1865, 1895), 'torch.cat', 'torch.cat', (['(padding, x)'], {'dim': '(2)'}), '((padding, x), dim=2)\n', (1874, 1895), False, 'import torch\n'), ((2271, 2314), 'torch.index_select', 'torch.index_select', (['x'], {'dim': '(0)', 'index': 'Gi_flat'}), '(x, dim=0, index=Gi_flat)\n', (2289, 2314), False, 'import torch\n'), ((2582, 2626), 'torch.abs', 'torch.abs', (['(f[:, :, :, (1)] - f[:, :, :, (3)])'], {}), '(f[:, :, :, (1)] - f[:, :, :, (3)])\n', (2591, 2626), False, 'import torch\n'), ((2637, 2681), 'torch.abs', 'torch.abs', (['(f[:, :, :, (2)] - f[:, :, :, (4)])'], {}), '(f[:, :, :, (2)] - f[:, :, :, (4)])\n', (2646, 2681), False, 'import torch\n'), ((2690, 2747), 'torch.stack', 'torch.stack', (['[f[:, :, :, (0)], x_1, x_2, x_3, x_4]'], {'dim': '(3)'}), '([f[:, :, :, (0)], x_1, x_2, x_3, x_4], dim=3)\n', (2701, 2747), False, 'import torch\n'), ((3301, 3366), 'torch.nn.functional.pad', 'F.pad', (['padded_gemm', '(0, 0, 0, xsz - m.edges_count)', '"""constant"""', '(0)'], {}), "(padded_gemm, (0, 0, 0, xsz - m.edges_count), 'constant', 0)\n", (3306, 3366), True, 'import torch.nn.functional as F\n'), ((3032, 3073), 'torch.tensor', 'torch.tensor', (['m.gemm_edges'], {'device': 'device'}), '(m.gemm_edges, device=device)\n', (3044, 3073), False, 'import torch\n'), ((1001, 1039), 'torch.arange', 'torch.arange', (['(b * ne)'], {'device': 'Gi.device'}), '(b * ne, device=Gi.device)\n', (1013, 1039), False, 'import torch\n')] |
AvinWangZH/3D-convolutional-speaker-recognition | code/0-input/create_hdf5/check_hdf5.py | 61969eb2dba6004bdecb4f7100047015ca665348 | import tables
import numpy as np
import matplotlib.pyplot as plt
# Reading the file.
fileh = tables.open_file('development.hdf5', mode='r')
# Dimentionality of the data structure.
print(fileh.root.utterance_test.shape)
print(fileh.root.utterance_train.shape)
print(fileh.root.label_train.shape)
print(fileh.root.label_test.shape)
| [((94, 140), 'tables.open_file', 'tables.open_file', (['"""development.hdf5"""'], {'mode': '"""r"""'}), "('development.hdf5', mode='r')\n", (110, 140), False, 'import tables\n')] |
LinkTsang/qtask-legacy-python | qtask/utils/testing.py | 9b264b8e33313e4d3615472d59a2a39948eeeaa1 | import asyncio
import traceback
import unittest
def async_test(f):
def wrapper(test_case: unittest.TestCase, *args, **kwargs):
loop = asyncio.get_event_loop()
task = loop.create_task(f(test_case, *args, **kwargs))
try:
loop.run_until_complete(task)
except Exception:
traceback.print_exc()
raise
return wrapper
| [((148, 172), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (170, 172), False, 'import asyncio\n'), ((329, 350), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (348, 350), False, 'import traceback\n')] |
theflatladder/kyrsovaya | landing/views.py | d6d661854cd955e544a199e201f325decc360cc1 | from django.shortcuts import render, render_to_response, redirect
from django.contrib import auth
from django.contrib.auth.forms import UserCreationForm
from django.template.context_processors import csrf
from django.http import HttpResponseRedirect
def login(request):
args = {}
args.update(csrf(request))
if request.POST:
username = request.POST.get('username')
password = request.POST.get('password')
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return redirect('/main')
else:
args['login_error'] = "Пользователь не найден или пароль введен неверный пароль"
return render_to_response('login.html', args)
else:
return render_to_response('login.html', args)
def reg(request):
auth.logout(request)
error = ''
if request.method == "POST":
newuser_form = UserCreationForm(data = request.POST)
if newuser_form.is_valid():
newuser_form.save()
newuser = auth.authenticate(username = newuser_form.cleaned_data['username'], password = newuser_form.cleaned_data['password1'])
auth.login(request, newuser)
return redirect('/main')
else:
error = 'Проверьте правильность вводимых данных.'
else:
newuser_form = UserCreationForm()
return render(request, 'reg.html', locals() )
def main(request):
return render(request, 'index.html', {'username': auth.get_user(request).username} )
def logout(request):
auth.logout(request)
return HttpResponseRedirect("/login")
| [((863, 883), 'django.contrib.auth.logout', 'auth.logout', (['request'], {}), '(request)\n', (874, 883), False, 'from django.contrib import auth\n'), ((1597, 1617), 'django.contrib.auth.logout', 'auth.logout', (['request'], {}), '(request)\n', (1608, 1617), False, 'from django.contrib import auth\n'), ((1629, 1659), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/login"""'], {}), "('/login')\n", (1649, 1659), False, 'from django.http import HttpResponseRedirect\n'), ((301, 314), 'django.template.context_processors.csrf', 'csrf', (['request'], {}), '(request)\n', (305, 314), False, 'from django.template.context_processors import csrf\n'), ((450, 505), 'django.contrib.auth.authenticate', 'auth.authenticate', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (467, 505), False, 'from django.contrib import auth\n'), ((800, 838), 'django.shortcuts.render_to_response', 'render_to_response', (['"""login.html"""', 'args'], {}), "('login.html', args)\n", (818, 838), False, 'from django.shortcuts import render, render_to_response, redirect\n'), ((955, 990), 'django.contrib.auth.forms.UserCreationForm', 'UserCreationForm', ([], {'data': 'request.POST'}), '(data=request.POST)\n', (971, 990), False, 'from django.contrib.auth.forms import UserCreationForm\n'), ((1389, 1407), 'django.contrib.auth.forms.UserCreationForm', 'UserCreationForm', ([], {}), '()\n', (1405, 1407), False, 'from django.contrib.auth.forms import UserCreationForm\n'), ((547, 572), 'django.contrib.auth.login', 'auth.login', (['request', 'user'], {}), '(request, user)\n', (557, 572), False, 'from django.contrib import auth\n'), ((592, 609), 'django.shortcuts.redirect', 'redirect', (['"""/main"""'], {}), "('/main')\n", (600, 609), False, 'from django.shortcuts import render, render_to_response, redirect\n'), ((736, 774), 'django.shortcuts.render_to_response', 'render_to_response', (['"""login.html"""', 'args'], {}), "('login.html', args)\n", (754, 774), False, 'from django.shortcuts import render, render_to_response, redirect\n'), ((1083, 1202), 'django.contrib.auth.authenticate', 'auth.authenticate', ([], {'username': "newuser_form.cleaned_data['username']", 'password': "newuser_form.cleaned_data['password1']"}), "(username=newuser_form.cleaned_data['username'], password=\n newuser_form.cleaned_data['password1'])\n", (1100, 1202), False, 'from django.contrib import auth\n'), ((1214, 1242), 'django.contrib.auth.login', 'auth.login', (['request', 'newuser'], {}), '(request, newuser)\n', (1224, 1242), False, 'from django.contrib import auth\n'), ((1262, 1279), 'django.shortcuts.redirect', 'redirect', (['"""/main"""'], {}), "('/main')\n", (1270, 1279), False, 'from django.shortcuts import render, render_to_response, redirect\n'), ((1536, 1558), 'django.contrib.auth.get_user', 'auth.get_user', (['request'], {}), '(request)\n', (1549, 1558), False, 'from django.contrib import auth\n')] |
yecfly/DEPRESSIONEST | FPRun11.py | 21b72906aac9f310e264f7a5eea348480a647197 | from Facepatchindependenttrain import runPatch
import sys
if len(sys.argv)==6:
runPatch(GPU_Device_ID=1, FacePatchID=int(sys.argv[1]),
trainpklID=int(sys.argv[2]), testpklID=int(sys.argv[3]),
NetworkType=int(sys.argv[4]),
runs=int(sys.argv[5]))
else:
print("argument errors, try\npython runfile.py <FacePatchID> <trainpklID> <testpklID> <NetworkType> <runs>")
| [] |
jgmize/kitsune | vendor/packages/translate-toolkit/translate/convert/test_po2tmx.py | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from translate.convert import po2tmx
from translate.convert import test_convert
from translate.misc import wStringIO
from translate.storage import tmx
from translate.storage import lisa
class TestPO2TMX:
def po2tmx(self, posource, sourcelanguage='en', targetlanguage='af'):
"""helper that converts po source to tmx source without requiring files"""
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
outputfile.tmxfile = tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage)
po2tmx.convertpo(inputfile, outputfile, templatefile=None, sourcelanguage=sourcelanguage, targetlanguage=targetlanguage)
return outputfile.tmxfile
def test_basic(self):
minipo = r"""# Afrikaans translation of program ABC
#
msgid ""
msgstr ""
"Project-Id-Version: program 2.1-branch\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2006-01-09 07:15+0100\n"
"PO-Revision-Date: 2004-03-30 17:02+0200\n"
"Last-Translator: Zuza Software Foundation <[email protected]>\n"
"Language-Team: Afrikaans <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
# Please remember to do something
#: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4
msgid "Applications"
msgstr "Toepassings"
"""
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate("Applications") == "Toepassings"
assert tmx.translate("bla") is None
xmltext = str(tmx)
assert xmltext.index('creationtool="Translate Toolkit - po2tmx"')
assert xmltext.index('adminlang')
assert xmltext.index('creationtoolversion')
assert xmltext.index('datatype')
assert xmltext.index('o-tmf')
assert xmltext.index('segtype')
assert xmltext.index('srclang')
def test_sourcelanguage(self):
minipo = 'msgid "String"\nmsgstr "String"\n'
tmx = self.po2tmx(minipo, sourcelanguage="xh")
print "The generated xml:"
print str(tmx)
header = tmx.document.find("header")
assert header.get("srclang") == "xh"
def test_targetlanguage(self):
minipo = 'msgid "String"\nmsgstr "String"\n'
tmx = self.po2tmx(minipo, targetlanguage="xh")
print "The generated xml:"
print str(tmx)
tuv = tmx.document.findall(".//%s" % tmx.namespaced("tuv"))[1]
#tag[0] will be the source, we want the target tuv
assert tuv.get("{%s}lang" % lisa.XML_NS) == "xh"
def test_multiline(self):
"""Test multiline po entry"""
minipo = r'''msgid "First part "
"and extra"
msgstr "Eerste deel "
"en ekstra"'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate('First part and extra') == 'Eerste deel en ekstra'
def test_escapednewlines(self):
"""Test the escaping of newlines"""
minipo = r'''msgid "First line\nSecond line"
msgstr "Eerste lyn\nTweede lyn"
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate("First line\nSecond line") == "Eerste lyn\nTweede lyn"
def test_escapedtabs(self):
"""Test the escaping of tabs"""
minipo = r'''msgid "First column\tSecond column"
msgstr "Eerste kolom\tTweede kolom"
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate("First column\tSecond column") == "Eerste kolom\tTweede kolom"
def test_escapedquotes(self):
"""Test the escaping of quotes (and slash)"""
minipo = r'''msgid "Hello \"Everyone\""
msgstr "Good day \"All\""
msgid "Use \\\"."
msgstr "Gebruik \\\"."
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate('Hello "Everyone"') == 'Good day "All"'
assert tmx.translate(r'Use \".') == r'Gebruik \".'
def test_exclusions(self):
"""Test that empty and fuzzy messages are excluded"""
minipo = r'''#, fuzzy
msgid "One"
msgstr "Een"
msgid "Two"
msgstr ""
msgid ""
msgstr "Drie"
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert "<tu" not in str(tmx)
assert len(tmx.units) == 0
def test_nonascii(self):
"""Tests that non-ascii conversion works."""
minipo = r'''msgid "Bézier curve"
msgstr "Bézier-kurwe"
'''
tmx = self.po2tmx(minipo)
print str(tmx)
assert tmx.translate(u"Bézier curve") == u"Bézier-kurwe"
class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX):
"""Tests running actual po2tmx commands on files"""
convertmodule = po2tmx
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-l LANG, --language=LANG")
options = self.help_check(options, "--source-language=LANG", last=True)
| [] |
mesabib/kodi.yatp | plugin.video.yatp/libs/client/commands.py | d874df43047b5b58f84cb3760fc891d9a133a69f | # coding: utf-8
# Module: commands
# Created on: 28.07.2015
# Author: Roman Miroshnychenko aka Roman V.M. ([email protected])
# Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html
"""
Context menu commands
"""
import sys
import xbmc
import xbmcgui
import json_requests as jsonrq
from simpleplugin import Addon
addon = Addon('plugin.video.yatp')
_ = addon.initialize_gettext()
def show_torrent_info(info_hash):
"""
Display current torrent info
:param info_hash:
:return:
"""
torr_info = jsonrq.get_torrent_info(info_hash)
info_dialog = xbmcgui.DialogProgress()
info_dialog.create(torr_info['name'])
while not info_dialog.iscanceled():
info_dialog.update(torr_info['progress'],
_('state: {0}; seeds: {1}; peers: {2}').format(
torr_info['state'],
torr_info['num_seeds'],
torr_info['num_peers']
),
_('size: {0}MB; DL speed: {1}KB/s; UL speed: {2}KB/s').format(
torr_info['size'],
torr_info['dl_speed'],
torr_info['ul_speed']
),
_('total DL: {0}MB; total UL: {1}MB').format(
torr_info['total_download'],
torr_info['total_upload'])
)
xbmc.sleep(1000)
torr_info = jsonrq.get_torrent_info(info_hash)
if __name__ == '__main__':
if sys.argv[1] == 'pause':
jsonrq.pause_torrent(sys.argv[2])
elif sys.argv[1] == 'resume':
jsonrq.resume_torrent(sys.argv[2])
elif sys.argv[1] == 'delete' and xbmcgui.Dialog().yesno(
_('Confirm delete'),
_('Do you really want to delete the torrent?')):
jsonrq.remove_torrent(sys.argv[2], False)
elif sys.argv[1] == 'delete_with_files'and xbmcgui.Dialog().yesno(
_('Confirm delete'),
_('Do you really want to delete the torrent with files?'),
_('Warning: The files will be deleted permanently!')):
jsonrq.remove_torrent(sys.argv[2], True)
elif sys.argv[1] == 'pause_all':
jsonrq.pause_all()
elif sys.argv[1] == 'resume_all':
jsonrq.resume_all()
elif sys.argv[1] == 'show_info':
show_torrent_info(sys.argv[2])
elif sys.argv[1] == 'restore_finished':
jsonrq.restore_finished(sys.argv[2])
else:
addon.log_debug('Command cancelled or invalid command: {0}'.format(sys.argv[1]))
xbmc.executebuiltin('Container.Refresh')
| [((323, 349), 'simpleplugin.Addon', 'Addon', (['"""plugin.video.yatp"""'], {}), "('plugin.video.yatp')\n", (328, 349), False, 'from simpleplugin import Addon\n'), ((518, 552), 'json_requests.get_torrent_info', 'jsonrq.get_torrent_info', (['info_hash'], {}), '(info_hash)\n', (541, 552), True, 'import json_requests as jsonrq\n'), ((571, 595), 'xbmcgui.DialogProgress', 'xbmcgui.DialogProgress', ([], {}), '()\n', (593, 595), False, 'import xbmcgui\n'), ((2643, 2683), 'xbmc.executebuiltin', 'xbmc.executebuiltin', (['"""Container.Refresh"""'], {}), "('Container.Refresh')\n", (2662, 2683), False, 'import xbmc\n'), ((1498, 1514), 'xbmc.sleep', 'xbmc.sleep', (['(1000)'], {}), '(1000)\n', (1508, 1514), False, 'import xbmc\n'), ((1535, 1569), 'json_requests.get_torrent_info', 'jsonrq.get_torrent_info', (['info_hash'], {}), '(info_hash)\n', (1558, 1569), True, 'import json_requests as jsonrq\n'), ((1638, 1671), 'json_requests.pause_torrent', 'jsonrq.pause_torrent', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (1658, 1671), True, 'import json_requests as jsonrq\n'), ((1714, 1748), 'json_requests.resume_torrent', 'jsonrq.resume_torrent', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (1735, 1748), True, 'import json_requests as jsonrq\n'), ((1912, 1953), 'json_requests.remove_torrent', 'jsonrq.remove_torrent', (['sys.argv[2]', '(False)'], {}), '(sys.argv[2], False)\n', (1933, 1953), True, 'import json_requests as jsonrq\n'), ((2204, 2244), 'json_requests.remove_torrent', 'jsonrq.remove_torrent', (['sys.argv[2]', '(True)'], {}), '(sys.argv[2], True)\n', (2225, 2244), True, 'import json_requests as jsonrq\n'), ((1786, 1802), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (1800, 1802), False, 'import xbmcgui\n'), ((2290, 2308), 'json_requests.pause_all', 'jsonrq.pause_all', ([], {}), '()\n', (2306, 2308), True, 'import json_requests as jsonrq\n'), ((2001, 2017), 'xbmcgui.Dialog', 'xbmcgui.Dialog', ([], {}), '()\n', (2015, 2017), False, 'import xbmcgui\n'), ((2355, 2374), 'json_requests.resume_all', 'jsonrq.resume_all', ([], {}), '()\n', (2372, 2374), True, 'import json_requests as jsonrq\n'), ((2503, 2539), 'json_requests.restore_finished', 'jsonrq.restore_finished', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (2526, 2539), True, 'import json_requests as jsonrq\n')] |
GeorgeDittmar/MarkovTextGenerator | setup.py | df6a56e23051e1f263ba22889dc3b5d0dc03e370 | #!/usr/bin/env python
from distutils.core import setup
setup(name='Mimik',
version='1.0',
description='Python framework for markov models',
author='George Dittmar',
author_email='[email protected]',
url='https://www.python.org/sigs/distutils-sig/',
packages=['distutils', 'distutils.command'],
)
| [((57, 319), 'distutils.core.setup', 'setup', ([], {'name': '"""Mimik"""', 'version': '"""1.0"""', 'description': '"""Python framework for markov models"""', 'author': '"""George Dittmar"""', 'author_email': '"""[email protected]"""', 'url': '"""https://www.python.org/sigs/distutils-sig/"""', 'packages': "['distutils', 'distutils.command']"}), "(name='Mimik', version='1.0', description=\n 'Python framework for markov models', author='George Dittmar',\n author_email='[email protected]', url=\n 'https://www.python.org/sigs/distutils-sig/', packages=['distutils',\n 'distutils.command'])\n", (62, 319), False, 'from distutils.core import setup\n')] |
deplatformr/open-images | pipeline/scripts/package.py | 3726c9802bda1d7ecbbbd9920d5566daaecc9faa | import os
import shutil
import sqlite3
import tarfile
from datetime import datetime
import bagit
def create_package(images, batch_dir):
package_threshold = 838860800 # 800 Mib to the next power of 2 = 1GiB
print("Package threshold: " + get_human_readable_file_size(package_threshold))
abs_path = os.getcwd()
try:
package_size = 0
for image in images:
package_size += image[1]
print("Total batch size: " + get_human_readable_file_size(package_size))
if package_size < package_threshold:
print("Not enough images yet to make a package from this batch.")
return()
else:
try:
# create new batch directory
split = os.path.split(batch_dir)
new_dir_number = int(split[1]) + 1
new_batch_dir = os.path.join(split[0], str(new_dir_number))
os.makedirs(new_batch_dir)
# move all related files for the last image that's getting removed from batch to keep within threshold
last_image = images[-1]
path, dirs, files = next(os.walk(batch_dir))
for file in files:
if file.find(last_image[0]) != -1:
filepath = os.path.join(path, file)
shutil.move(filepath, os.path.join(
new_batch_dir, file))
# drop the last image from the list (convert tuple) to get the package size back under threshold
images.pop(-1)
except Exception as e:
print("Unable to separate batch to make a package.")
print(e)
return()
# Convert batch directory into a Bagit directory
external_identifier = "deplatformr-open-images-" + split[1]
bagit.make_bag(batch_dir,
{'Source-Organization': 'Deplatformr Project', 'Organization-Address': 'https://open-images.deplatformr.com', 'External-Description': 'This package contains a subset of the Google Open Images dataset used for machine learning training. The image files have been downloaded from their Flickr server source, verified for fixity, had EXIF metadata extracted, and are now bundled here with their annotation data, segmentation files and newly generated sha512 checksums. This content and context is described in a sidecar metadata files using schema.org/ImageObject and JSON-LD format.', 'External-Identifier': external_identifier, 'License': 'https://creativecommons.org/licenses/by/2.0/'}, checksums=["sha512"])
print("Created a Bagit directory.")
try:
# Create the tar package
packages_dir = os.path.join(
os.getcwd(), "source_data/packages/")
tarball_name = external_identifier + ".tar"
tarball = tarfile.open(os.path.join(
packages_dir, tarball_name), "w")
tarball.add(batch_dir, arcname=external_identifier)
tarball.close()
print("Created tarball " + tarball_name + ".")
except Exception as e:
print("Unable to create a tarball package from batch.")
print(e)
return()
try:
shutil.rmtree(batch_dir)
print("Deleted the batch source directory.")
except OSError as e:
print("Unable to delete the source directory.")
print(e)
# record the tarball package name for each image
db_path = os.path.join(
abs_path, "source_data/deplatformr_open_images_v6.sqlite")
images_db = sqlite3.connect(db_path)
cursor = images_db.cursor()
for image in images:
cursor.execute("UPDATE open_images SET package_name = ? WHERE ImageID = ?",
(tarball_name, image[0],),)
images_db.commit()
images_db.close()
# add tarball name, size, and timestamp to the workflow dbase
utctime = datetime.utcnow()
tarball_size = os.path.getsize(
os.path.join(packages_dir, tarball_name))
print("Tarball size is: " + get_human_readable_file_size(tarball_size))
db_path = os.path.join(
abs_path, "deplatformr_open_images_workflow.sqlite")
workflow_db = sqlite3.connect(db_path)
cursor = workflow_db.cursor()
for image in images:
print("Linking image " +
image[0] + " to " + tarball_name + " in SQLite.")
cursor.execute(
"UPDATE images SET package_name = ? WHERE image_id = ?", (tarball_name, image[0],),)
cursor.execute("INSERT INTO packages (name, size, timestamp) VALUES (?,?,?)",
(tarball_name, tarball_size, utctime,),)
workflow_db.commit()
workflow_db.close()
except Exception as e:
print("Unable to create a package for batch directory " + batch_dir)
print(e)
def get_human_readable_file_size(size, precision=2):
suffixes = ["B", "KiB", "MiB", "GiB", "TiB"]
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 # increment the index of the suffix
size = size / 1024.0 # apply the division
return "%.*f %s" % (precision, size, suffixes[suffixIndex])
return()
| [((313, 324), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (322, 324), False, 'import os\n'), ((1870, 2625), 'bagit.make_bag', 'bagit.make_bag', (['batch_dir', "{'Source-Organization': 'Deplatformr Project', 'Organization-Address':\n 'https://open-images.deplatformr.com', 'External-Description':\n 'This package contains a subset of the Google Open Images dataset used for machine learning training. The image files have been downloaded from their Flickr server source, verified for fixity, had EXIF metadata extracted, and are now bundled here with their annotation data, segmentation files and newly generated sha512 checksums. This content and context is described in a sidecar metadata files using schema.org/ImageObject and JSON-LD format.'\n , 'External-Identifier': external_identifier, 'License':\n 'https://creativecommons.org/licenses/by/2.0/'}"], {'checksums': "['sha512']"}), "(batch_dir, {'Source-Organization': 'Deplatformr Project',\n 'Organization-Address': 'https://open-images.deplatformr.com',\n 'External-Description':\n 'This package contains a subset of the Google Open Images dataset used for machine learning training. The image files have been downloaded from their Flickr server source, verified for fixity, had EXIF metadata extracted, and are now bundled here with their annotation data, segmentation files and newly generated sha512 checksums. This content and context is described in a sidecar metadata files using schema.org/ImageObject and JSON-LD format.'\n , 'External-Identifier': external_identifier, 'License':\n 'https://creativecommons.org/licenses/by/2.0/'}, checksums=['sha512'])\n", (1884, 2625), False, 'import bagit\n'), ((3655, 3726), 'os.path.join', 'os.path.join', (['abs_path', '"""source_data/deplatformr_open_images_v6.sqlite"""'], {}), "(abs_path, 'source_data/deplatformr_open_images_v6.sqlite')\n", (3667, 3726), False, 'import os\n'), ((3768, 3792), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (3783, 3792), False, 'import sqlite3\n'), ((4175, 4192), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4190, 4192), False, 'from datetime import datetime\n'), ((4401, 4466), 'os.path.join', 'os.path.join', (['abs_path', '"""deplatformr_open_images_workflow.sqlite"""'], {}), "(abs_path, 'deplatformr_open_images_workflow.sqlite')\n", (4413, 4466), False, 'import os\n'), ((4510, 4534), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (4525, 4534), False, 'import sqlite3\n'), ((751, 775), 'os.path.split', 'os.path.split', (['batch_dir'], {}), '(batch_dir)\n', (764, 775), False, 'import os\n'), ((919, 945), 'os.makedirs', 'os.makedirs', (['new_batch_dir'], {}), '(new_batch_dir)\n', (930, 945), False, 'import os\n'), ((3363, 3387), 'shutil.rmtree', 'shutil.rmtree', (['batch_dir'], {}), '(batch_dir)\n', (3376, 3387), False, 'import shutil\n'), ((4253, 4293), 'os.path.join', 'os.path.join', (['packages_dir', 'tarball_name'], {}), '(packages_dir, tarball_name)\n', (4265, 4293), False, 'import os\n'), ((1146, 1164), 'os.walk', 'os.walk', (['batch_dir'], {}), '(batch_dir)\n', (1153, 1164), False, 'import os\n'), ((2804, 2815), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2813, 2815), False, 'import os\n'), ((2941, 2981), 'os.path.join', 'os.path.join', (['packages_dir', 'tarball_name'], {}), '(packages_dir, tarball_name)\n', (2953, 2981), False, 'import os\n'), ((1291, 1315), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (1303, 1315), False, 'import os\n'), ((1362, 1395), 'os.path.join', 'os.path.join', (['new_batch_dir', 'file'], {}), '(new_batch_dir, file)\n', (1374, 1395), False, 'import os\n')] |
don4apaev/anfisa | app/search/hot_eval/hl_reportable.py | 2e4bdd83c584c0000f037413ccc1f9067c07fa70 | def evalRec(env, rec):
"""hl_reportable"""
return (len(set(rec.Genes) &
{
'ABHD12',
'ACTG1',
'ADGRV1',
'AIFM1',
'ATP6V1B1',
'BCS1L',
'BSND',
'CABP2',
'CACNA1D',
'CDC14A',
'CDH23',
'CEACAM16',
'CEP78',
'CHD7',
'CIB2',
'CISD2',
'CLDN14',
'CLIC5',
'CLPP',
'CLRN1',
'COCH',
'COL11A2',
'DIAPH1',
'DIAPH3',
'DMXL2',
'DNMT1',
'DSPP',
'EDN3',
'EDNRB',
'EPS8',
'EPS8L2',
'ESPN',
'ESRRB',
'EYA1',
'EYA4',
'GIPC3',
'GJB2',
'GJB6',
'GPSM2',
'GRHL2',
'GRXCR1',
'GSDME',
'HGF',
'HSD17B4',
'ILDR1',
'KCNE1',
'KCNQ1',
'KCNQ4',
'LARS2',
'LHFPL5',
'LOXHD1',
'LRTOMT',
'MARVELD2',
'MIR96',
'MITF',
'MSRB3',
'MT-RNR1',
'MT-TS1',
'MYH14',
'MYH9',
'MYO15A',
'MYO3A',
'MYO6',
'MYO7A',
'OSBPL2',
'OTOA',
'OTOF',
'OTOG',
'OTOGL',
'P2RX2',
'PAX3',
'PDZD7',
'PJVK',
'POU3F4',
'POU4F3',
'PRPS1',
'PTPRQ',
'RDX',
'RIPOR2',
'S1PR2',
'SERPINB6',
'SIX1',
'SLC17A8',
'SLC26A4',
'SLC52A2',
'SLITRK6',
'SMPX',
'SOX10',
'STRC',
'SYNE4',
'TBC1D24',
'TECTA',
'TIMM8A',
'TMC1',
'TMIE',
'TMPRSS3',
'TPRN',
'TRIOBP',
'TUBB4B',
'USH1C',
'USH1G',
'USH2A',
'WFS1',
'WHRN',
}
) > 0) | [] |
fau-is/grm | eval/util/metrics.py | 78b1559ea0dda1b817283adecd58da50ca232223 | import sklearn
import pandas
import seaborn as sns
import matplotlib.pyplot as pyplot
from functools import reduce
# import numpy as np
def metrics_from_prediction_and_label(labels, predictions, verbose=False):
measures = {
"accuracy": sklearn.metrics.accuracy_score(labels, predictions),
"balanced_accuracy": sklearn.metrics.balanced_accuracy_score(labels, predictions),
"precision_micro": sklearn.metrics.precision_score(labels, predictions, average='micro'),
"precision_macro": sklearn.metrics.precision_score(labels, predictions, average='macro'),
"precision_weighted": sklearn.metrics.precision_score(labels, predictions, average='weighted'),
"recall_micro": sklearn.metrics.recall_score(labels, predictions, average='micro'),
"recall_macro": sklearn.metrics.recall_score(labels, predictions, average='macro'),
"recall_weighted": sklearn.metrics.recall_score(labels, predictions, average='weighted'),
"f1_score_micro": sklearn.metrics.f1_score(labels, predictions, average='micro'),
"f1_score_macro": sklearn.metrics.f1_score(labels, predictions, average='macro'),
"f1_score_weighted": sklearn.metrics.f1_score(labels, predictions, average='weighted')
}
try:
measures["roc_auc_weighted"] = multi_class_roc_auc_score(labels, predictions, 'weighted')
measures["roc_auc_macro"] = multi_class_roc_auc_score(labels, predictions, 'macro')
measures["roc_auc_micro"] = multi_class_roc_auc_score(labels, predictions, 'micro')
except ValueError:
print("Warning: Roc auc score can not be calculated ...")
try:
# note we use the average precision at different threshold values as the auc of the pr-curve
# and not the auc-pr-curve with the trapezoidal rule / linear interpolation because it could be too optimistic
measures["auc_prc_weighted"] = multi_class_prc_auc_score(labels, predictions, 'weighted')
measures["auc_prc_macro"] = multi_class_prc_auc_score(labels, predictions, 'macro')
measures["auc_prc_micro"] = multi_class_prc_auc_score(labels, predictions, 'micro')
except ValueError:
print("Warning: Auc prc score can not be calculated ...")
save_confusion_matrix(labels, predictions)
report = save_classification_report(labels, predictions)
classes = list(sorted(set(labels)))
for pos_class in classes:
measures[str(pos_class) + "_precision"] = report[str(pos_class)]['precision']
measures[str(pos_class) + "_recall"] = report[str(pos_class)]['recall']
measures[str(pos_class) + "_f1-score"] = report[str(pos_class)]['f1-score']
measures[str(pos_class) + "_support"] = report[str(pos_class)]['support']
if pos_class == 1:
neg_class = 0
else:
neg_class = 1
tp, fp, tn, fn = calculate_cm_states(labels, predictions, pos_class, neg_class)
measures[str(pos_class) + "_tp"] = tp
measures[str(pos_class) + "_fp"] = fp
measures[str(pos_class) + "_tn"] = tn
measures[str(pos_class) + "_fn"] = fn
if tn + fp == 0:
pass
else:
# Specificity or true negative rate
measures[str(pos_class) + "_tnr"] = tn / (tn + fp)
# Fall out or false positive rate
measures[str(pos_class) + "_fpr"] = fp / (fp + tn)
if tn + fn == 0:
pass
else:
# Negative predictive value
measures[str(pos_class) + "_npv"] = tn / (tn + fn)
if tp + fn == 0:
pass
else:
# False negative rate
measures[str(pos_class) + "_fnr"] = fn / (tp + fn)
if tp + fp == 0:
pass
else:
# False discovery rate
measures[str(pos_class) + "_fdr"] = fp / (tp + fp)
return measures
def calculate_cm_states(labels, predictions, pos_class, neg_class):
tp = 0
fp = 0
tn = 0
fn = 0
for i in range(len(predictions)):
if labels[i] == predictions[i] == pos_class:
tp += 1
if predictions[i] == pos_class and labels[i] != predictions[i]:
fp += 1
if labels[i] == predictions[i] == neg_class:
tn += 1
if predictions[i] == neg_class and labels[i] != predictions[i]:
fn += 1
return tp, fp, tn, fn
def save_classification_report(labels, predictions):
return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True)
def multi_class_roc_auc_score(label, predict, average):
label_binarizer = sklearn.preprocessing.LabelBinarizer()
label_binarizer.fit(label)
label = label_binarizer.transform(label)
predict = label_binarizer.transform(predict)
return sklearn.metrics.roc_auc_score(label, predict, average=average)
def multi_class_prc_auc_score(label, predict, average):
label_binarizer = sklearn.preprocessing.LabelBinarizer()
label_binarizer.fit(label)
label = label_binarizer.transform(label)
predict = label_binarizer.transform(predict)
return sklearn.metrics.average_precision_score(label, predict, average=average)
def label_binarizer(labels):
for index in range(0, len(labels)):
if labels[index] >= 0.5:
labels[index] = 1.0
else:
labels[index] = 0.0
return labels
def save_confusion_matrix(labels, predictions, path="../../../results/cm.pdf"):
classes = sklearn.utils.multiclass.unique_labels(labels, predictions)
cms = []
cm = sklearn.metrics.confusion_matrix(labels, predictions)
cm_df = pandas.DataFrame(cm, index=classes, columns=classes)
cms.append(cm_df)
def prettify(n):
"""
if n > 1000000:
return str(np.round(n / 1000000, 1)) + 'M'
elif n > 1000:
return str(np.round(n / 1000, 1)) + 'K'
else:
return str(n)
"""
return str(n)
cm = reduce(lambda x, y: x.add(y, fill_value=0), cms)
annot = cm.applymap(prettify)
cm = (cm.T / cm.sum(axis=1)).T
fig, g = pyplot.subplots(figsize=(7, 4.5))
g = sns.heatmap(cm, annot=annot, fmt='', cmap='Blues', cbar=False, rasterized=True, linewidths=0.1)
_ = g.set(ylabel='Actual', xlabel='Prediction')
for _, spine in g.spines.items():
spine.set_visible(True)
pyplot.xticks(rotation=45)
fig.tight_layout()
fig.savefig(path)
pyplot.close() | [((4466, 4560), 'sklearn.metrics.classification_report', 'sklearn.metrics.classification_report', ([], {'y_true': 'labels', 'y_pred': 'predictions', 'output_dict': '(True)'}), '(y_true=labels, y_pred=predictions,\n output_dict=True)\n', (4503, 4560), False, 'import sklearn\n'), ((4637, 4675), 'sklearn.preprocessing.LabelBinarizer', 'sklearn.preprocessing.LabelBinarizer', ([], {}), '()\n', (4673, 4675), False, 'import sklearn\n'), ((4813, 4875), 'sklearn.metrics.roc_auc_score', 'sklearn.metrics.roc_auc_score', (['label', 'predict'], {'average': 'average'}), '(label, predict, average=average)\n', (4842, 4875), False, 'import sklearn\n'), ((4956, 4994), 'sklearn.preprocessing.LabelBinarizer', 'sklearn.preprocessing.LabelBinarizer', ([], {}), '()\n', (4992, 4994), False, 'import sklearn\n'), ((5132, 5204), 'sklearn.metrics.average_precision_score', 'sklearn.metrics.average_precision_score', (['label', 'predict'], {'average': 'average'}), '(label, predict, average=average)\n', (5171, 5204), False, 'import sklearn\n'), ((5504, 5563), 'sklearn.utils.multiclass.unique_labels', 'sklearn.utils.multiclass.unique_labels', (['labels', 'predictions'], {}), '(labels, predictions)\n', (5542, 5563), False, 'import sklearn\n'), ((5586, 5639), 'sklearn.metrics.confusion_matrix', 'sklearn.metrics.confusion_matrix', (['labels', 'predictions'], {}), '(labels, predictions)\n', (5618, 5639), False, 'import sklearn\n'), ((5652, 5704), 'pandas.DataFrame', 'pandas.DataFrame', (['cm'], {'index': 'classes', 'columns': 'classes'}), '(cm, index=classes, columns=classes)\n', (5668, 5704), False, 'import pandas\n'), ((6130, 6163), 'matplotlib.pyplot.subplots', 'pyplot.subplots', ([], {'figsize': '(7, 4.5)'}), '(figsize=(7, 4.5))\n', (6145, 6163), True, 'import matplotlib.pyplot as pyplot\n'), ((6172, 6272), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': 'annot', 'fmt': '""""""', 'cmap': '"""Blues"""', 'cbar': '(False)', 'rasterized': '(True)', 'linewidths': '(0.1)'}), "(cm, annot=annot, fmt='', cmap='Blues', cbar=False, rasterized=\n True, linewidths=0.1)\n", (6183, 6272), True, 'import seaborn as sns\n'), ((6396, 6422), 'matplotlib.pyplot.xticks', 'pyplot.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (6409, 6422), True, 'import matplotlib.pyplot as pyplot\n'), ((6472, 6486), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (6484, 6486), True, 'import matplotlib.pyplot as pyplot\n'), ((251, 302), 'sklearn.metrics.accuracy_score', 'sklearn.metrics.accuracy_score', (['labels', 'predictions'], {}), '(labels, predictions)\n', (281, 302), False, 'import sklearn\n'), ((333, 393), 'sklearn.metrics.balanced_accuracy_score', 'sklearn.metrics.balanced_accuracy_score', (['labels', 'predictions'], {}), '(labels, predictions)\n', (372, 393), False, 'import sklearn\n'), ((422, 491), 'sklearn.metrics.precision_score', 'sklearn.metrics.precision_score', (['labels', 'predictions'], {'average': '"""micro"""'}), "(labels, predictions, average='micro')\n", (453, 491), False, 'import sklearn\n'), ((520, 589), 'sklearn.metrics.precision_score', 'sklearn.metrics.precision_score', (['labels', 'predictions'], {'average': '"""macro"""'}), "(labels, predictions, average='macro')\n", (551, 589), False, 'import sklearn\n'), ((621, 693), 'sklearn.metrics.precision_score', 'sklearn.metrics.precision_score', (['labels', 'predictions'], {'average': '"""weighted"""'}), "(labels, predictions, average='weighted')\n", (652, 693), False, 'import sklearn\n'), ((719, 785), 'sklearn.metrics.recall_score', 'sklearn.metrics.recall_score', (['labels', 'predictions'], {'average': '"""micro"""'}), "(labels, predictions, average='micro')\n", (747, 785), False, 'import sklearn\n'), ((811, 877), 'sklearn.metrics.recall_score', 'sklearn.metrics.recall_score', (['labels', 'predictions'], {'average': '"""macro"""'}), "(labels, predictions, average='macro')\n", (839, 877), False, 'import sklearn\n'), ((906, 975), 'sklearn.metrics.recall_score', 'sklearn.metrics.recall_score', (['labels', 'predictions'], {'average': '"""weighted"""'}), "(labels, predictions, average='weighted')\n", (934, 975), False, 'import sklearn\n'), ((1003, 1065), 'sklearn.metrics.f1_score', 'sklearn.metrics.f1_score', (['labels', 'predictions'], {'average': '"""micro"""'}), "(labels, predictions, average='micro')\n", (1027, 1065), False, 'import sklearn\n'), ((1093, 1155), 'sklearn.metrics.f1_score', 'sklearn.metrics.f1_score', (['labels', 'predictions'], {'average': '"""macro"""'}), "(labels, predictions, average='macro')\n", (1117, 1155), False, 'import sklearn\n'), ((1186, 1251), 'sklearn.metrics.f1_score', 'sklearn.metrics.f1_score', (['labels', 'predictions'], {'average': '"""weighted"""'}), "(labels, predictions, average='weighted')\n", (1210, 1251), False, 'import sklearn\n')] |
gabrielpiassetta/django-pgschemas | dpgs_sandbox/tests/test_bug_migrations_in_base_models.py | 1e76db4cef31c7534bf4ba109961e835a1dd3c96 | import warnings
from unittest.mock import patch
from django.apps import apps
from django.core import management
from django.core.management.base import CommandError
from django.db import models
from django.db.utils import ProgrammingError
from django.test import TransactionTestCase, tag
from django_pgschemas.checks import check_schema_names
from django_pgschemas.models import TenantMixin
from django_pgschemas.utils import get_tenant_model
TenantModel = get_tenant_model()
def patched_get_tenant_model(*args, **kwargs):
class TenantModel(TenantMixin):
dummy = models.TextField()
class Meta:
app_label = get_tenant_model()._meta.app_label
return TenantModel
@tag("bug")
class MigrationZeroRoundTripTestCase(TransactionTestCase):
"""
Provoke a handled ProgrammingError by migrating models from empty database.
"""
def test_database_checks_with_zero_migrations(self):
management.call_command("migrate", "shared_public", "zero", verbosity=0)
# The goal is that the next line doesn't raise ProgrammingError
check_schema_names(apps.get_app_config("django_pgschemas"))
management.call_command("migrate", verbosity=0)
@tag("bug")
class UnappliedMigrationTestCase(TransactionTestCase):
"""
Provoke a handled ProgrammingError by running tenant command with pending model changes.
"""
@classmethod
def setUpClass(cls):
tenant1 = TenantModel(schema_name="tenant1")
tenant1.save(verbosity=0)
@classmethod
def tearDownClass(cls):
for tenant in TenantModel.objects.all():
tenant.delete(force_drop=True)
@patch("django_pgschemas.management.commands.get_tenant_model", patched_get_tenant_model)
def test_whowill_with_pending_migrations(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore") # Avoid warnings about model being registered twice
with self.assertRaises(CommandError) as ctx:
management.call_command("whowill", all_schemas=True, verbosity=0)
self.assertEqual(
str(ctx.exception),
"Error while attempting to retrieve dynamic schemas. "
"Perhaps you need to migrate the 'public' schema first?",
)
@tag("bug")
class MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase):
@classmethod
def setUpClass(cls):
tenant1 = TenantModel(schema_name="tenant1")
tenant1.save(verbosity=0)
@classmethod
def tearDownClass(cls):
for tenant in TenantModel.objects.all():
tenant.delete(force_drop=True)
def test_migrate_with_exclusions(self):
# We first unapply a migration with fake so we can reapply it without fake
# This should work without errors
management.call_command("migrate", "app_tenants", "0001_initial", fake=True, schemas=["tenant1"], verbosity=0)
# We then migrate on all schemas except for tenant1, THIS IS THE CASE WE WANT TO TEST
# This should work without errors
management.call_command("migrate", all_schemas=True, excluded_schemas=["tenant1"], verbosity=0)
# If we try to global migrate now, we should get a ProgrammingError
with self.assertRaises(ProgrammingError):
management.call_command("migrate", all_schemas=True, verbosity=0)
# We finally apply the migration again with fake
# This should work without errors
management.call_command("migrate", fake=True, all_schemas=True, verbosity=0)
| [((460, 478), 'django_pgschemas.utils.get_tenant_model', 'get_tenant_model', ([], {}), '()\n', (476, 478), False, 'from django_pgschemas.utils import get_tenant_model\n'), ((706, 716), 'django.test.tag', 'tag', (['"""bug"""'], {}), "('bug')\n", (709, 716), False, 'from django.test import TransactionTestCase, tag\n'), ((1210, 1220), 'django.test.tag', 'tag', (['"""bug"""'], {}), "('bug')\n", (1213, 1220), False, 'from django.test import TransactionTestCase, tag\n'), ((2304, 2314), 'django.test.tag', 'tag', (['"""bug"""'], {}), "('bug')\n", (2307, 2314), False, 'from django.test import TransactionTestCase, tag\n'), ((1659, 1751), 'unittest.mock.patch', 'patch', (['"""django_pgschemas.management.commands.get_tenant_model"""', 'patched_get_tenant_model'], {}), "('django_pgschemas.management.commands.get_tenant_model',\n patched_get_tenant_model)\n", (1664, 1751), False, 'from unittest.mock import patch\n'), ((580, 598), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (596, 598), False, 'from django.db import models\n'), ((938, 1010), 'django.core.management.call_command', 'management.call_command', (['"""migrate"""', '"""shared_public"""', '"""zero"""'], {'verbosity': '(0)'}), "('migrate', 'shared_public', 'zero', verbosity=0)\n", (961, 1010), False, 'from django.core import management\n'), ((1159, 1206), 'django.core.management.call_command', 'management.call_command', (['"""migrate"""'], {'verbosity': '(0)'}), "('migrate', verbosity=0)\n", (1182, 1206), False, 'from django.core import management\n'), ((2827, 2941), 'django.core.management.call_command', 'management.call_command', (['"""migrate"""', '"""app_tenants"""', '"""0001_initial"""'], {'fake': '(True)', 'schemas': "['tenant1']", 'verbosity': '(0)'}), "('migrate', 'app_tenants', '0001_initial', fake=True,\n schemas=['tenant1'], verbosity=0)\n", (2850, 2941), False, 'from django.core import management\n'), ((3082, 3182), 'django.core.management.call_command', 'management.call_command', (['"""migrate"""'], {'all_schemas': '(True)', 'excluded_schemas': "['tenant1']", 'verbosity': '(0)'}), "('migrate', all_schemas=True, excluded_schemas=[\n 'tenant1'], verbosity=0)\n", (3105, 3182), False, 'from django.core import management\n'), ((3489, 3565), 'django.core.management.call_command', 'management.call_command', (['"""migrate"""'], {'fake': '(True)', 'all_schemas': '(True)', 'verbosity': '(0)'}), "('migrate', fake=True, all_schemas=True, verbosity=0)\n", (3512, 3565), False, 'from django.core import management\n'), ((1110, 1149), 'django.apps.apps.get_app_config', 'apps.get_app_config', (['"""django_pgschemas"""'], {}), "('django_pgschemas')\n", (1129, 1149), False, 'from django.apps import apps\n'), ((1813, 1838), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1836, 1838), False, 'import warnings\n'), ((1852, 1883), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1873, 1883), False, 'import warnings\n'), ((3316, 3381), 'django.core.management.call_command', 'management.call_command', (['"""migrate"""'], {'all_schemas': '(True)', 'verbosity': '(0)'}), "('migrate', all_schemas=True, verbosity=0)\n", (3339, 3381), False, 'from django.core import management\n'), ((2010, 2075), 'django.core.management.call_command', 'management.call_command', (['"""whowill"""'], {'all_schemas': '(True)', 'verbosity': '(0)'}), "('whowill', all_schemas=True, verbosity=0)\n", (2033, 2075), False, 'from django.core import management\n'), ((644, 662), 'django_pgschemas.utils.get_tenant_model', 'get_tenant_model', ([], {}), '()\n', (660, 662), False, 'from django_pgschemas.utils import get_tenant_model\n')] |
pingsutw/tfx | tfx/components/transform/component.py | bf0d1d74e3f6ea429989fc7b80b82bea08077857 | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Transform component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text, Union
import absl
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import executor_spec
from tfx.components.transform import executor
from tfx.orchestration import data_types
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import TransformSpec
class Transform(base_component.BaseComponent):
"""A TFX component to transform the input examples.
The Transform component wraps TensorFlow Transform (tf.Transform) to
preprocess data in a TFX pipeline. This component will load the
preprocessing_fn from input module file, preprocess both 'train' and 'eval'
splits of input examples, generate the `tf.Transform` output, and save both
transform function and transformed examples to orchestrator desired locations.
## Providing a preprocessing function
The TFX executor will use the estimator provided in the `module_file` file
to train the model. The Transform executor will look specifically for the
`preprocessing_fn()` function within that file.
An example of `preprocessing_fn()` can be found in the [user-supplied
code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py))
of the TFX Chicago Taxi pipeline example.
## Example
```
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=infer_schema.outputs['schema'],
module_file=module_file)
```
Please see https://www.tensorflow.org/tfx/transform for more details.
"""
SPEC_CLASS = TransformSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
def __init__(
self,
examples: types.Channel = None,
schema: types.Channel = None,
module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None,
preprocessing_fn: Optional[Union[Text,
data_types.RuntimeParameter]] = None,
transform_graph: Optional[types.Channel] = None,
transformed_examples: Optional[types.Channel] = None,
input_data: Optional[types.Channel] = None,
instance_name: Optional[Text] = None,
enable_cache: Optional[bool] = None):
"""Construct a Transform component.
Args:
examples: A Channel of type `standard_artifacts.Examples` (required).
This should contain the two splits 'train' and 'eval'.
schema: A Channel of type `standard_artifacts.Schema`. This should
contain a single schema artifact.
module_file: The file path to a python module file, from which the
'preprocessing_fn' function will be loaded. The function must have the
following signature.
def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]:
...
where the values of input and returned Dict are either tf.Tensor or
tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn'
must be supplied.
preprocessing_fn: The path to python function that implements a
'preprocessing_fn'. See 'module_file' for expected signature of the
function. Exactly one of 'module_file' or 'preprocessing_fn' must be
supplied.
transform_graph: Optional output 'TransformPath' channel for output of
'tf.Transform', which includes an exported Tensorflow graph suitable for
both training and serving;
transformed_examples: Optional output 'ExamplesPath' channel for
materialized transformed examples, which includes both 'train' and
'eval' splits.
input_data: Backwards compatibility alias for the 'examples' argument.
instance_name: Optional unique instance name. Necessary iff multiple
transform components are declared in the same pipeline.
enable_cache: Optional boolean to indicate if cache is enabled for the
Transform component. If not specified, defaults to the value
specified for pipeline's enable_cache parameter.
Raises:
ValueError: When both or neither of 'module_file' and 'preprocessing_fn'
is supplied.
"""
if input_data:
absl.logging.warning(
'The "input_data" argument to the Transform component has '
'been renamed to "examples" and is deprecated. Please update your '
'usage as support for this argument will be removed soon.')
examples = input_data
if bool(module_file) == bool(preprocessing_fn):
raise ValueError(
"Exactly one of 'module_file' or 'preprocessing_fn' must be supplied."
)
transform_graph = transform_graph or types.Channel(
type=standard_artifacts.TransformGraph,
artifacts=[standard_artifacts.TransformGraph()])
if not transformed_examples:
example_artifact = standard_artifacts.Examples()
example_artifact.split_names = artifact_utils.encode_split_names(
artifact.DEFAULT_EXAMPLE_SPLITS)
transformed_examples = types.Channel(
type=standard_artifacts.Examples, artifacts=[example_artifact])
spec = TransformSpec(
examples=examples,
schema=schema,
module_file=module_file,
preprocessing_fn=preprocessing_fn,
transform_graph=transform_graph,
transformed_examples=transformed_examples)
super(Transform, self).__init__(
spec=spec, instance_name=instance_name, enable_cache=enable_cache)
| [((2529, 2579), 'tfx.components.base.executor_spec.ExecutorClassSpec', 'executor_spec.ExecutorClassSpec', (['executor.Executor'], {}), '(executor.Executor)\n', (2560, 2579), False, 'from tfx.components.base import executor_spec\n'), ((5985, 6176), 'tfx.types.standard_component_specs.TransformSpec', 'TransformSpec', ([], {'examples': 'examples', 'schema': 'schema', 'module_file': 'module_file', 'preprocessing_fn': 'preprocessing_fn', 'transform_graph': 'transform_graph', 'transformed_examples': 'transformed_examples'}), '(examples=examples, schema=schema, module_file=module_file,\n preprocessing_fn=preprocessing_fn, transform_graph=transform_graph,\n transformed_examples=transformed_examples)\n', (5998, 6176), False, 'from tfx.types.standard_component_specs import TransformSpec\n'), ((5058, 5270), 'absl.logging.warning', 'absl.logging.warning', (['"""The "input_data" argument to the Transform component has been renamed to "examples" and is deprecated. Please update your usage as support for this argument will be removed soon."""'], {}), '(\n \'The "input_data" argument to the Transform component has been renamed to "examples" and is deprecated. Please update your usage as support for this argument will be removed soon.\'\n )\n', (5078, 5270), False, 'import absl\n'), ((5711, 5740), 'tfx.types.standard_artifacts.Examples', 'standard_artifacts.Examples', ([], {}), '()\n', (5738, 5740), False, 'from tfx.types import standard_artifacts\n'), ((5778, 5844), 'tfx.types.artifact_utils.encode_split_names', 'artifact_utils.encode_split_names', (['artifact.DEFAULT_EXAMPLE_SPLITS'], {}), '(artifact.DEFAULT_EXAMPLE_SPLITS)\n', (5811, 5844), False, 'from tfx.types import artifact_utils\n'), ((5885, 5962), 'tfx.types.Channel', 'types.Channel', ([], {'type': 'standard_artifacts.Examples', 'artifacts': '[example_artifact]'}), '(type=standard_artifacts.Examples, artifacts=[example_artifact])\n', (5898, 5962), False, 'from tfx import types\n'), ((5615, 5650), 'tfx.types.standard_artifacts.TransformGraph', 'standard_artifacts.TransformGraph', ([], {}), '()\n', (5648, 5650), False, 'from tfx.types import standard_artifacts\n')] |
anderslatif/alg | objects/GitIndexEntry.py | d5902a05a4cb249e554f65a7e8016d7d050b6da9 | # https://github.com/git/git/blob/master/Documentation/technical/index-format.txt
class GitIndexEntry(object):
# The last time a file's metadata changed. This is a tuple (seconds, nanoseconds)
ctime = None
# The last time a file's data changed. This is a tuple (seconds, nanoseconds)
mtime = None
# the ID of device containing this file
dev = None
# The file's inode number
ino = None
# The object type, either b1000 (regular), b1010 (symlink), b1110 (gitlink)
mode_type = None
# The object permissions as an integer
mode_permissions = None
# User ID of owner
uui = None
# Group ID of owner
gid = None
# Size of this object in bytes
size = None
# The object's hash as a hex string
object = None
flag_assume_valid = None
flag_extended = None
flag_stage = None
# Length of the name if < OxFFF, -1 otherwise
flag_name_length = None
name = None
| [] |
huzongxiang/CrystalNetwork | matdgl/layers/partitionpaddinglayer.py | a434f76fa4347d42b3c905852ce265cd0bcefca3 |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 13 14:47:13 2021
@author: huzongxiang
"""
import tensorflow as tf
from tensorflow.keras import layers
class PartitionPadding(layers.Layer):
def __init__(self, batch_size, **kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
def call(self, inputs):
features, graph_indices = inputs
# Obtain subgraphs
features = tf.dynamic_partition(
features, graph_indices, self.batch_size
)
# Pad and stack subgraphs
num_features = [tf.shape(f)[0] for f in features]
max_num = tf.reduce_max(num_features)
features_padded = tf.stack(
[
tf.pad(f, [(0, max_num - n), (0, 0)])
for f, n in zip(features, num_features)
],
axis=0,
)
# Remove empty subgraphs (usually for last batch)
nonempty_examples = tf.where(tf.reduce_sum(features_padded, (1, 2)) != 0)
nonempty_examples = tf.squeeze(nonempty_examples, axis=-1)
features_batch = tf.gather(features_padded, nonempty_examples, axis=0)
return features_batch
def get_config(self):
config = super().get_config()
config.update({"batch": self.batch_size})
return config
class PartitionPaddingPair(layers.Layer):
def __init__(self, batch_size, **kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
def call(self, inputs):
features, graph_indices = inputs
# Obtain subgraphs
features = tf.dynamic_partition(
features, graph_indices, self.batch_size
)
# Pad and stack subgraphs
num_features = [tf.shape(f)[0] for f in features]
max_num = tf.reduce_max(num_features)
features_padded = tf.stack(
[
tf.pad(f, [(0, max_num - n), (0, 0)])
for f, n in zip(features, num_features)
],
axis=0,
)
# Remove empty subgraphs (usually for last batch)
nonempty_examples = tf.unique(graph_indices)[0]
features_batch = tf.gather(features_padded, nonempty_examples, axis=0)
return features_batch
def get_config(self):
config = super().get_config()
config.update({"batch_size": self.batch_size})
return config | [((428, 490), 'tensorflow.dynamic_partition', 'tf.dynamic_partition', (['features', 'graph_indices', 'self.batch_size'], {}), '(features, graph_indices, self.batch_size)\n', (448, 490), True, 'import tensorflow as tf\n'), ((624, 651), 'tensorflow.reduce_max', 'tf.reduce_max', (['num_features'], {}), '(num_features)\n', (637, 651), True, 'import tensorflow as tf\n'), ((1034, 1072), 'tensorflow.squeeze', 'tf.squeeze', (['nonempty_examples'], {'axis': '(-1)'}), '(nonempty_examples, axis=-1)\n', (1044, 1072), True, 'import tensorflow as tf\n'), ((1100, 1153), 'tensorflow.gather', 'tf.gather', (['features_padded', 'nonempty_examples'], {'axis': '(0)'}), '(features_padded, nonempty_examples, axis=0)\n', (1109, 1153), True, 'import tensorflow as tf\n'), ((1615, 1677), 'tensorflow.dynamic_partition', 'tf.dynamic_partition', (['features', 'graph_indices', 'self.batch_size'], {}), '(features, graph_indices, self.batch_size)\n', (1635, 1677), True, 'import tensorflow as tf\n'), ((1811, 1838), 'tensorflow.reduce_max', 'tf.reduce_max', (['num_features'], {}), '(num_features)\n', (1824, 1838), True, 'import tensorflow as tf\n'), ((2202, 2255), 'tensorflow.gather', 'tf.gather', (['features_padded', 'nonempty_examples'], {'axis': '(0)'}), '(features_padded, nonempty_examples, axis=0)\n', (2211, 2255), True, 'import tensorflow as tf\n'), ((2147, 2171), 'tensorflow.unique', 'tf.unique', (['graph_indices'], {}), '(graph_indices)\n', (2156, 2171), True, 'import tensorflow as tf\n'), ((572, 583), 'tensorflow.shape', 'tf.shape', (['f'], {}), '(f)\n', (580, 583), True, 'import tensorflow as tf\n'), ((718, 755), 'tensorflow.pad', 'tf.pad', (['f', '[(0, max_num - n), (0, 0)]'], {}), '(f, [(0, max_num - n), (0, 0)])\n', (724, 755), True, 'import tensorflow as tf\n'), ((961, 999), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['features_padded', '(1, 2)'], {}), '(features_padded, (1, 2))\n', (974, 999), True, 'import tensorflow as tf\n'), ((1759, 1770), 'tensorflow.shape', 'tf.shape', (['f'], {}), '(f)\n', (1767, 1770), True, 'import tensorflow as tf\n'), ((1905, 1942), 'tensorflow.pad', 'tf.pad', (['f', '[(0, max_num - n), (0, 0)]'], {}), '(f, [(0, max_num - n), (0, 0)])\n', (1911, 1942), True, 'import tensorflow as tf\n')] |
khchine5/book | lino_book/projects/min9/settings/memory.py | b6272d33d49d12335d25cf0a2660f7996680b1d1 | from .demo import *
SITE.verbose_name = SITE.verbose_name + " (:memory:)"
# SITE = Site(globals(), title=Site.title+" (:memory:)")
DATABASES['default']['NAME'] = ':memory:'
| [] |
aryamanak10/diner-restaurant-website | reservation/urls.py | 6d2d9de89a73c5535ebf782c4d8bbfc6ca9489fc | from django.urls import path
from . import views
app_name = 'reservation'
urlpatterns = [
path('', views.reserve_table, name = 'reserve_table'),
] | [((96, 147), 'django.urls.path', 'path', (['""""""', 'views.reserve_table'], {'name': '"""reserve_table"""'}), "('', views.reserve_table, name='reserve_table')\n", (100, 147), False, 'from django.urls import path\n')] |
yumetov/chainer | chainer/_version.py | 522e017a18008ee00e39f4ae4b30f4f9db3824b2 | __version__ = '7.8.0'
_optional_dependencies = [
{
'name': 'CuPy',
'packages': [
'cupy-cuda120',
'cupy-cuda114',
'cupy-cuda113',
'cupy-cuda112',
'cupy-cuda111',
'cupy-cuda110',
'cupy-cuda102',
'cupy-cuda101',
'cupy-cuda100',
'cupy-cuda92',
'cupy-cuda91',
'cupy-cuda90',
'cupy-cuda80',
'cupy',
],
'specifier': '>=7.7.0,<8.0.0',
'help': 'https://docs.cupy.dev/en/latest/install.html',
},
{
'name': 'iDeep',
'packages': [
'ideep4py',
],
'specifier': '>=2.0.0.post3, <2.1',
'help': 'https://docs.chainer.org/en/latest/tips.html',
},
]
| [] |
qwerasdf887/image_augmentation | image_aug.py | 7d465eba4d6af5d9a4cd79bf1981c8ef206ffe42 | # coding=UTF-8
# This Python file uses the following encoding: utf-8
import cv2
import numpy as np
import xml.etree.cElementTree as ET
from random import sample
#default args:
default_args = {'noise_prob': 0.1,
'gasuss_mean': 0,
'gasuss_var': 0.001,
'rand_hug': 30,
'rand_saturation':30,
'rand_light': 30,
'rot_angle': 15,
'bordervalue': (127, 127, 127),
'zoom_out_value': 0.7,
'output_shape': (416, 416),
'take_value' : 5
}
#添加黑色noise
def sp_noise(image, box_loc=None, **kwargs):
h, w = image.shape[0:2]
noise = np.random.rand(h,w)
out_img = image.copy()
out_img[noise < kwargs['noise_prob']] = 0
if box_loc is None:
return out_img
else:
return out_img, box_loc
#高斯noise
def gasuss_noise(image, box_loc=None, **kwargs):
out_img = (image / 255.) - 0.5
noise = np.random.normal(kwargs['gasuss_mean'], kwargs['gasuss_var']** 0.5, image.shape)
out_img = out_img + noise + 0.5
out_img[out_img < 0] = 0
out_img[out_img > 1] = 1
out_img = (out_img * 255).astype(np.uint8)
if box_loc is None:
return out_img
else:
return out_img, box_loc
#調整彩度(彩度通道加上隨機-N~N之值)
def mod_hue(image, box_loc=None, **kwargs):
out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32)
out_img[:,:,0] += np.random.randint(-kwargs['rand_hug'], kwargs['rand_hug'])
out_img = cv2.cvtColor(np.clip(out_img, 0, 180).astype(np.uint8), cv2.COLOR_HSV2BGR)
if box_loc is None:
return out_img
else:
return out_img, box_loc
#調整飽和度(飽和度通道加上隨機-N~N之值)
def mod_saturation(image, box_loc=None, **kwargs):
out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32)
out_img[:,:,1] += np.random.randint(-kwargs['rand_saturation'], kwargs['rand_saturation'])
out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR)
if box_loc is None:
return out_img
else:
return out_img, box_loc
#調整亮度(亮度通道加上隨機-N~N之值)
def mod_light(image, box_loc=None, **kwargs):
out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32)
out_img[:,:,2] += np.random.randint(-kwargs['rand_light'], kwargs['rand_light'])
out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR)
if box_loc is None:
return out_img
else:
return out_img, box_loc
#水平翻轉
def horizontal_flip(image, box_loc=None, **kwargs):
'''
Args:
box_loc: bounding box location(x_min, y_min, x_max, y_max)
'''
if box_loc is None:
return cv2.flip(image, 1)
else:
w = image.shape[1]
for i in box_loc:
if i[2] == 0:
break
else:
x_min, x_max = i[0], i[2]
i[0] = w - x_max
i[2] = w - x_min
return cv2.flip(image, 1), box_loc
#垂直翻轉
def vertical_flip(image, box_loc=None, **kwargs):
'''
Args:
box_loc: bounding box location(num box,(x_min, y_min, x_max, y_max, label))
'''
if box_loc is None:
return cv2.flip(image, 0)
else:
h = image.shape[0]
for i in box_loc:
if i[3] == 0:
break
else:
y_min, y_max = i[1], i[3]
i[1] = h - y_max
i[3] = h - y_min
return cv2.flip(image, 0), box_loc
#旋轉-n~n度
def rot_image(image, box_loc=None, **kwargs):
'''
Args:
box_loc: bounding box location(num box,(x_min, y_min, x_max, y_max, label))
rot: 要選轉的範圍
bordervalue: 空白處補的值
'''
h, w, _ = image.shape
center = ( w // 2, h // 2)
angle = np.random.randint(-kwargs['rot_angle'], kwargs['rot_angle'])
M = cv2.getRotationMatrix2D(center, angle, 1)
out_img = cv2.warpAffine(image, M, (w, h), borderValue = kwargs['bordervalue'])
if box_loc is None:
return out_img
else:
loc = box_loc[:,0:4].copy()
loc = np.append(loc, loc[:, 0:1], axis=-1)
loc = np.append(loc, loc[:, 3:4], axis=-1)
loc = np.append(loc, loc[:, 2:3], axis=-1)
loc = np.append(loc, loc[:, 1:2], axis=-1)
loc = loc.reshape(-1, 4, 2)
loc = loc - np.array(center)
rot_loc = loc.dot(np.transpose(M[:,0:2]))
rot_loc = rot_loc + np.array(center)
rot_box = np.hstack([np.min(rot_loc, axis=-2), np.max(rot_loc, axis=-2), box_loc[:, 4:5]])
rot_box = np.floor(rot_box)
rot_box[...,0:4] = np.clip(rot_box[...,0:4], [0,0,0,0], [w-1, h-1, w-1, h-1])
return out_img, rot_box
#等比例縮放影像
def resize_img(image, box_loc=None, **kwargs):
h, w, _ = image.shape
max_edge = max(kwargs['output_shape'][0], kwargs['output_shape'][1])
scale = min( max_edge / h, max_edge / w)
h = int(h * scale)
w = int(w * scale)
if box_loc is None:
return cv2.resize(image, (w, h))
else:
box_loc[:,0] = box_loc[:,0] * scale
box_loc[:,1] = box_loc[:,1] * scale
box_loc[:,2] = box_loc[:,2] * scale
box_loc[:,3] = box_loc[:,3] * scale
return cv2.resize(image, (w, h)), box_loc.astype(np.int32)
#將樸片補至指定大小
def padding_img(image, box_loc=None, **kwargs):
h, w, _ = image.shape
dx = int((kwargs['output_shape'][1] - w) / 2)
dy = int((kwargs['output_shape'][0] - h) / 2)
out_img = np.ones((kwargs['output_shape'][0], kwargs['output_shape'][1], 3), np.uint8) * kwargs['bordervalue'][0]
out_img[dy: dy + h, dx: dx + w] = cv2.resize(image, (w, h))
if box_loc is None:
return out_img
else:
box_loc[:,0] = box_loc[:,0] + dx
box_loc[:,1] = box_loc[:,1] + dy
box_loc[:,2] = box_loc[:,2] + dx
box_loc[:,3] = box_loc[:,3] + dy
return out_img, box_loc.astype(np.int32)
#隨機縮小 value~1倍
def random_zoom_out(image, box_loc=None, **kwargs):
h, w, _ = image.shape
scale = np.random.uniform(kwargs['zoom_out_value'], 1)
h = int(h * scale)
w = int(w * scale)
dx = int((image.shape[1] - w) / 2)
dy = int((image.shape[0] - h) / 2)
out_img = np.ones(image.shape, np.uint8) * kwargs['bordervalue'][0]
out_img[dy: dy + h, dx: dx + w] = cv2.resize(image, (w, h))
if box_loc is None:
return out_img
else:
box_loc[:,0] = box_loc[:,0] * scale + dx
box_loc[:,1] = box_loc[:,1] * scale + dy
box_loc[:,2] = box_loc[:,2] * scale + dx
box_loc[:,3] = box_loc[:,3] * scale + dy
return out_img, box_loc.astype(np.int32)
#load csv data
def load_csv(xml_path, max_boxes=4):
tree = ET.parse(xml_path)
root = tree.getroot()
#location list
loc_list = np.zeros((0, 5))
box_count = 0
for obj in root.iter('object'):
if box_count >= max_boxes:
break
'''
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult) == 1:
continue
cls_id = classes.index(cls)
'''
loc = obj.find('bndbox')
x_min = int(loc.find('xmin').text)
y_min = int(loc.find('ymin').text)
x_max = int(loc.find('xmax').text)
y_max = int(loc.find('ymax').text)
loc_list = np.vstack([loc_list, np.array([x_min, y_min, x_max, y_max, 0])])
box_count += 1
return loc_list.astype(np.float32)
#draw rectangle
def draw_rect(image, box_loc):
for i in box_loc:
cv2.rectangle(image, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])), (0, 255, 0), 4)
def print_args(**kwargs):
for key, value in kwargs.items():
print('key name: {}\nvalue:{}\n'.format(key, value))
#隨機選擇0~N個 image augmentation方法
def rand_aug_image(image, box_loc=None, **kwargs):
if box_loc is None:
out_img = resize_img(image, **kwargs)
else:
out_img, box_loc = resize_img(image, box_loc, **kwargs)
#total augmentation function
func_list = [sp_noise, gasuss_noise, mod_hue, mod_saturation, mod_light,
horizontal_flip, vertical_flip, rot_image, random_zoom_out]
#rand take function
take_func = sample(func_list, np.random.randint(kwargs['take_value']))
for func in take_func:
if box_loc is None:
out_img = func(out_img, **kwargs)
else:
out_img, box_loc = func(out_img, box_loc, **kwargs)
if box_loc is None:
out_img = padding_img(out_img, **kwargs)
return out_img
else:
out_img, box_loc = padding_img(out_img, box_loc, **kwargs)
return out_img, box_loc
if __name__ == "__main__":
img = cv2.imread('./00002.jpg')
bbox = load_csv('./00002.xml')
#黑點noise
#aug_img = sp_noise(img, **default_args)
#aug_img, bbox = sp_noise(img, bbox, **default_args)
#gasuss_noise
#aug_img = gasuss_noise(img, **default_args)
#aug_img, bbox = gasuss_noise(img, bbox, **default_args)
#調整Hue
#aug_img = mod_hue(img, **default_args)
#aug_img, bbox = mod_hue(img, bbox, **default_args)
#調整saturation
#aug_img = mod_saturation(img, **default_args)
#aug_img, bbox = mod_saturation(img, bbox, **default_args)
#調整light
#aug_img = mod_light(img, **default_args)
#aug_img, bbox = mod_light(img, bbox, **default_args)
#水平翻轉
#aug_img = horizontal_flip(img, **default_args)
#aug_img, bbox = horizontal_flip(img, bbox, **default_args)
#垂直翻轉
#aug_img = vertical_flip(img, **default_args)
#aug_img, bbox = vertical_flip(img, bbox, **default_args)
#旋轉角度
#aug_img = rot_image(img, **default_args)
#aug_img, bbox = rot_image(img, bbox, **default_args)
#等比例resize至指定大小
#aug_img = resize_img(img, **default_args)
#aug_img, bbox = resize_img(img, bbox, **default_args)
#補形狀至指定大小
#aug_img = padding_img(aug_img, **default_args)
#aug_img, bbox = padding_img(aug_img, bbox, **default_args)
#隨機縮小 N~1倍
#aug_img = random_zoom_out(img, **default_args)
#aug_img, bbox = random_zoom_out(img, bbox, **default_args)
#隨機選擇augmentation方法
aug_img = rand_aug_image(img, **default_args)
#aug_img, bbox = rand_aug_image(img, bbox, **default_args)
print(bbox)
draw_rect(aug_img, bbox)
cv2.imshow('img', img)
cv2.imshow('aug img', aug_img)
cv2.waitKey(0)
cv2.destroyAllWindows() | [((700, 720), 'numpy.random.rand', 'np.random.rand', (['h', 'w'], {}), '(h, w)\n', (714, 720), True, 'import numpy as np\n'), ((988, 1074), 'numpy.random.normal', 'np.random.normal', (["kwargs['gasuss_mean']", "(kwargs['gasuss_var'] ** 0.5)", 'image.shape'], {}), "(kwargs['gasuss_mean'], kwargs['gasuss_var'] ** 0.5, image.\n shape)\n", (1004, 1074), True, 'import numpy as np\n'), ((1460, 1518), 'numpy.random.randint', 'np.random.randint', (["(-kwargs['rand_hug'])", "kwargs['rand_hug']"], {}), "(-kwargs['rand_hug'], kwargs['rand_hug'])\n", (1477, 1518), True, 'import numpy as np\n'), ((1867, 1939), 'numpy.random.randint', 'np.random.randint', (["(-kwargs['rand_saturation'])", "kwargs['rand_saturation']"], {}), "(-kwargs['rand_saturation'], kwargs['rand_saturation'])\n", (1884, 1939), True, 'import numpy as np\n'), ((2281, 2343), 'numpy.random.randint', 'np.random.randint', (["(-kwargs['rand_light'])", "kwargs['rand_light']"], {}), "(-kwargs['rand_light'], kwargs['rand_light'])\n", (2298, 2343), True, 'import numpy as np\n'), ((3800, 3860), 'numpy.random.randint', 'np.random.randint', (["(-kwargs['rot_angle'])", "kwargs['rot_angle']"], {}), "(-kwargs['rot_angle'], kwargs['rot_angle'])\n", (3817, 3860), True, 'import numpy as np\n'), ((3869, 3910), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', 'angle', '(1)'], {}), '(center, angle, 1)\n', (3892, 3910), False, 'import cv2\n'), ((3925, 3992), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(w, h)'], {'borderValue': "kwargs['bordervalue']"}), "(image, M, (w, h), borderValue=kwargs['bordervalue'])\n", (3939, 3992), False, 'import cv2\n'), ((5624, 5649), 'cv2.resize', 'cv2.resize', (['image', '(w, h)'], {}), '(image, (w, h))\n', (5634, 5649), False, 'import cv2\n'), ((6028, 6074), 'numpy.random.uniform', 'np.random.uniform', (["kwargs['zoom_out_value']", '(1)'], {}), "(kwargs['zoom_out_value'], 1)\n", (6045, 6074), True, 'import numpy as np\n'), ((6309, 6334), 'cv2.resize', 'cv2.resize', (['image', '(w, h)'], {}), '(image, (w, h))\n', (6319, 6334), False, 'import cv2\n'), ((6703, 6721), 'xml.etree.cElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (6711, 6721), True, 'import xml.etree.cElementTree as ET\n'), ((6783, 6799), 'numpy.zeros', 'np.zeros', (['(0, 5)'], {}), '((0, 5))\n', (6791, 6799), True, 'import numpy as np\n'), ((8719, 8744), 'cv2.imread', 'cv2.imread', (['"""./00002.jpg"""'], {}), "('./00002.jpg')\n", (8729, 8744), False, 'import cv2\n'), ((10331, 10353), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (10341, 10353), False, 'import cv2\n'), ((10358, 10388), 'cv2.imshow', 'cv2.imshow', (['"""aug img"""', 'aug_img'], {}), "('aug img', aug_img)\n", (10368, 10388), False, 'import cv2\n'), ((10393, 10407), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (10404, 10407), False, 'import cv2\n'), ((10412, 10435), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10433, 10435), False, 'import cv2\n'), ((2713, 2731), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (2721, 2731), False, 'import cv2\n'), ((3218, 3236), 'cv2.flip', 'cv2.flip', (['image', '(0)'], {}), '(image, 0)\n', (3226, 3236), False, 'import cv2\n'), ((4102, 4138), 'numpy.append', 'np.append', (['loc', 'loc[:, 0:1]'], {'axis': '(-1)'}), '(loc, loc[:, 0:1], axis=-1)\n', (4111, 4138), True, 'import numpy as np\n'), ((4153, 4189), 'numpy.append', 'np.append', (['loc', 'loc[:, 3:4]'], {'axis': '(-1)'}), '(loc, loc[:, 3:4], axis=-1)\n', (4162, 4189), True, 'import numpy as np\n'), ((4204, 4240), 'numpy.append', 'np.append', (['loc', 'loc[:, 2:3]'], {'axis': '(-1)'}), '(loc, loc[:, 2:3], axis=-1)\n', (4213, 4240), True, 'import numpy as np\n'), ((4255, 4291), 'numpy.append', 'np.append', (['loc', 'loc[:, 1:2]'], {'axis': '(-1)'}), '(loc, loc[:, 1:2], axis=-1)\n', (4264, 4291), True, 'import numpy as np\n'), ((4577, 4594), 'numpy.floor', 'np.floor', (['rot_box'], {}), '(rot_box)\n', (4585, 4594), True, 'import numpy as np\n'), ((4622, 4694), 'numpy.clip', 'np.clip', (['rot_box[(...), 0:4]', '[0, 0, 0, 0]', '[w - 1, h - 1, w - 1, h - 1]'], {}), '(rot_box[(...), 0:4], [0, 0, 0, 0], [w - 1, h - 1, w - 1, h - 1])\n', (4629, 4694), True, 'import numpy as np\n'), ((5001, 5026), 'cv2.resize', 'cv2.resize', (['image', '(w, h)'], {}), '(image, (w, h))\n', (5011, 5026), False, 'import cv2\n'), ((5482, 5558), 'numpy.ones', 'np.ones', (["(kwargs['output_shape'][0], kwargs['output_shape'][1], 3)", 'np.uint8'], {}), "((kwargs['output_shape'][0], kwargs['output_shape'][1], 3), np.uint8)\n", (5489, 5558), True, 'import numpy as np\n'), ((6213, 6243), 'numpy.ones', 'np.ones', (['image.shape', 'np.uint8'], {}), '(image.shape, np.uint8)\n', (6220, 6243), True, 'import numpy as np\n'), ((8254, 8293), 'numpy.random.randint', 'np.random.randint', (["kwargs['take_value']"], {}), "(kwargs['take_value'])\n", (8271, 8293), True, 'import numpy as np\n'), ((1380, 1418), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (1392, 1418), False, 'import cv2\n'), ((1787, 1825), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (1799, 1825), False, 'import cv2\n'), ((2201, 2239), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (2213, 2239), False, 'import cv2\n'), ((2984, 3002), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (2992, 3002), False, 'import cv2\n'), ((3489, 3507), 'cv2.flip', 'cv2.flip', (['image', '(0)'], {}), '(image, 0)\n', (3497, 3507), False, 'import cv2\n'), ((4348, 4364), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (4356, 4364), True, 'import numpy as np\n'), ((4391, 4414), 'numpy.transpose', 'np.transpose', (['M[:, 0:2]'], {}), '(M[:, 0:2])\n', (4403, 4414), True, 'import numpy as np\n'), ((4443, 4459), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (4451, 4459), True, 'import numpy as np\n'), ((5228, 5253), 'cv2.resize', 'cv2.resize', (['image', '(w, h)'], {}), '(image, (w, h))\n', (5238, 5253), False, 'import cv2\n'), ((1546, 1570), 'numpy.clip', 'np.clip', (['out_img', '(0)', '(180)'], {}), '(out_img, 0, 180)\n', (1553, 1570), True, 'import numpy as np\n'), ((1967, 1991), 'numpy.clip', 'np.clip', (['out_img', '(0)', '(255)'], {}), '(out_img, 0, 255)\n', (1974, 1991), True, 'import numpy as np\n'), ((2371, 2395), 'numpy.clip', 'np.clip', (['out_img', '(0)', '(255)'], {}), '(out_img, 0, 255)\n', (2378, 2395), True, 'import numpy as np\n'), ((4489, 4513), 'numpy.min', 'np.min', (['rot_loc'], {'axis': '(-2)'}), '(rot_loc, axis=-2)\n', (4495, 4513), True, 'import numpy as np\n'), ((4515, 4539), 'numpy.max', 'np.max', (['rot_loc'], {'axis': '(-2)'}), '(rot_loc, axis=-2)\n', (4521, 4539), True, 'import numpy as np\n'), ((7374, 7415), 'numpy.array', 'np.array', (['[x_min, y_min, x_max, y_max, 0]'], {}), '([x_min, y_min, x_max, y_max, 0])\n', (7382, 7415), True, 'import numpy as np\n')] |
intimanipuchi/tiny_python_projects | 03_picnic/picnic.py | 5e419620ae07b0bcf8df073ba3f6c6c3d7d1a93c | #!/usr/bin/env python3
"""
Author : Roman Koziy <[email protected]>
Date : 2021-12-15
Purpose: Working with lists
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description="Working with lists",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("items",
type=str,
nargs="+",
metavar="str",
help="item(s) to bring")
parser.add_argument("-s",
"--sorted",
help="a boolean flag",
action="store_true")
return parser.parse_args()
# --------------------------------------------------
def main():
"""The main function: formatting and printing the output"""
args = get_args()
sort_flag = args.sorted
items = args.items
if sort_flag:
items = sorted(items)
if len(items) == 1:
print(f"You are bringing {items[0]}.")
elif len(items) < 3:
items.insert(-1, "and")
print(f"You are bringing {' '.join(items)}.")
else:
# print(items)
last = items[-1]
and_last = "and " + last
items[-1] = and_last
# print(items)
print(f"You are bringing {', '.join(items)}.")
# --------------------------------------------------
if __name__ == "__main__":
main()
| [((262, 380), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Working with lists"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Working with lists', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (285, 380), False, 'import argparse\n')] |
montyshyama/python-basics | triangle.py | d71156d70fdadc722a192b984e9bff66401ab894 | side_a=int(input("Enter the first side(a):"))
side_b=int(input("Enter the second side(b):"))
side_c=int(input("Enter the third side(c):"))
if side_a==side_b and side_a==side_c:
print("The triangle is an equilateral triangle.")
elif side_a==side_b or side_a==side_c or side_b==side_c:
print("The triangle is an isosceles triangle.")
else:
print("The triangle is scalene triangle.") | [] |
ktmud/david | david/modules/artist/view.py | 4b8d6f804b73cdfa1a8ddf784077fa9a39f1e36f | # -*- coding: utf-8 -*-
from flask import Blueprint, request
from david.lib.template import st
from .model import Artist
bp = Blueprint('artist', __name__)
@bp.app_template_global('artists')
def artists():
return Artist.query.all()
@bp.route('/artist/<uid>/')
def intro(uid):
artist = Artist.get_or_404(uid)
return st('modules/artist/show.html', **locals())
@bp.route('/artist/<uid>/detail')
def detail(uid):
artist = Artist.get_or_404(uid)
return st('modules/artist/detailed.html', **locals())
| [((127, 156), 'flask.Blueprint', 'Blueprint', (['"""artist"""', '__name__'], {}), "('artist', __name__)\n", (136, 156), False, 'from flask import Blueprint, request\n')] |
JessieRamaux/Food-Volume-Estimation | Volume Estimation/volume.py | 260b0e78a3b6a7b8bbe9daf98956502beea92552 | import numpy as np
import cv2
import os
import json
import glob
from PIL import Image, ImageDraw
plate_diameter = 25 #cm
plate_depth = 1.5 #cm
plate_thickness = 0.2 #cm
def Max(x, y):
if (x >= y):
return x
else:
return y
def polygons_to_mask(img_shape, polygons):
mask = np.zeros(img_shape, dtype=np.uint8)
mask = Image.fromarray(mask)
xy = list(map(tuple, polygons))
ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)
mask = np.array(mask, dtype=bool)
return mask
def mask2box(mask):
index = np.argwhere(mask == 1)
rows = index[:, 0]
clos = index[:, 1]
left_top_r = np.min(rows)
left_top_c = np.min(clos)
right_bottom_r = np.max(rows)
right_bottom_c = np.max(clos)
return [left_top_c, left_top_r, right_bottom_c, right_bottom_r]
def get_bbox(points, h, w):
polygons = points
mask = polygons_to_mask([h,w], polygons)
return mask2box(mask)
def get_scale(points, img, lowest):
bbox = get_bbox(points, img.shape[0], img.shape[1])
diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2
len_per_pix = plate_diameter/float(diameter)
avg = 0
k = 0
for point in points:
avg += img[point[1]][point[0]]
k += 1
avg = avg/float(k)
depth = lowest - avg
depth_per_pix = plate_depth/depth
return len_per_pix, depth_per_pix
def cal_volume(points, img, len_per_pix, depth_per_pix, lowest):
volume = 0.0
bbox = get_bbox(points, img.shape[0], img.shape[1])
points = np.array(points)
shape = points.shape
points = points.reshape(shape[0], 1, shape[1])
for i in range(bbox[0], bbox[2]+1):
for j in range(bbox[1], bbox[3]+1):
if (cv2.pointPolygonTest(points, (i,j), False) >= 0):
volume += Max(0, (lowest - img[j][i]) * depth_per_pix - plate_thickness) * len_per_pix * len_per_pix
return volume
def get_volume(img, json_path):
lowest = np.max(img)
vol_dict = {}
#print(lowest)
len_per_pix = 0.0
depth_per_pix = 0.0
with open(json_path, 'r') as json_file:
data = json.load(json_file)
for shape in data['shapes']:
if (shape['label'] == "plate"):
len_per_pix, depth_per_pix = get_scale(shape['points'], img, lowest)
#print(len_per_pix, depth_per_pix)
break
for shape in data['shapes']:
label = shape['label']
if (label == "plate"):
continue
points = shape['points']
volume = cal_volume(points, img, len_per_pix, depth_per_pix, lowest)
if (label in vol_dict):
vol_dict[label] += volume
else:
vol_dict[label] = volume
return vol_dict
img = cv2.imread("out.png",0)
print(get_volume(img,"test.json")) | [((2774, 2798), 'cv2.imread', 'cv2.imread', (['"""out.png"""', '(0)'], {}), "('out.png', 0)\n", (2784, 2798), False, 'import cv2\n'), ((302, 337), 'numpy.zeros', 'np.zeros', (['img_shape'], {'dtype': 'np.uint8'}), '(img_shape, dtype=np.uint8)\n', (310, 337), True, 'import numpy as np\n'), ((349, 370), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {}), '(mask)\n', (364, 370), False, 'from PIL import Image, ImageDraw\n'), ((477, 503), 'numpy.array', 'np.array', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (485, 503), True, 'import numpy as np\n'), ((554, 576), 'numpy.argwhere', 'np.argwhere', (['(mask == 1)'], {}), '(mask == 1)\n', (565, 576), True, 'import numpy as np\n'), ((640, 652), 'numpy.min', 'np.min', (['rows'], {}), '(rows)\n', (646, 652), True, 'import numpy as np\n'), ((670, 682), 'numpy.min', 'np.min', (['clos'], {}), '(clos)\n', (676, 682), True, 'import numpy as np\n'), ((704, 716), 'numpy.max', 'np.max', (['rows'], {}), '(rows)\n', (710, 716), True, 'import numpy as np\n'), ((738, 750), 'numpy.max', 'np.max', (['clos'], {}), '(clos)\n', (744, 750), True, 'import numpy as np\n'), ((1520, 1536), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1528, 1536), True, 'import numpy as np\n'), ((1945, 1956), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1951, 1956), True, 'import numpy as np\n'), ((2099, 2119), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2108, 2119), False, 'import json\n'), ((411, 431), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['mask'], {}), '(mask)\n', (425, 431), False, 'from PIL import Image, ImageDraw\n'), ((1713, 1756), 'cv2.pointPolygonTest', 'cv2.pointPolygonTest', (['points', '(i, j)', '(False)'], {}), '(points, (i, j), False)\n', (1733, 1756), False, 'import cv2\n')] |
tianluyuan/pyutils | t2k/bin/cmttags.py | 2cd3a90dbbd3d0eec3054fb9493ca0f6e0272e50 | #!/usr/bin/env python
"""
A script to create tags for CMT managed packages.
Call from within cmt/ directory
"""
import subprocess
import sys
import os
from optparse import OptionParser
__author__ = 'Tianlu Yuan'
__email__ = 'tianlu.yuan [at] colorado.edu'
# Ignore large external packages for now
IGNORES = ['CMT', 'EXTERN', 'GSL', 'MYSQL', 'GEANT', 'CLHEP']
# Extensions for finding src files, must satisfy unix wildcard rules
EXTENSIONS = {'cpp': ('*.[hc]', '*.[hc]xx', '*.[hc]pp', '*.cc', '*.hh'),
'python':('*.py'),
'java':('*.java')}
# Ignore these files and dirs, key specifies argument to find
# (e.g. '-iname')
PRUNE = {'iname':['*_Dict.[hc]*', '*linkdef.h']}
def check_dir():
""" Are we inside cmt/
"""
if os.path.basename(os.getcwd()) != 'cmt':
sys.exit('Not inside cmt directory!')
def check_requirements():
""" Ensure that requirements file exists in cmt dir
"""
if not os.path.isfile('requirements'):
sys.exit('No requirements file!')
def init_use_dict():
"""Returns the initial use_dict which contains the current (cwd)
package and its path. 'cmt show uses' does not include the
package itself.
"""
# Must call os.path.dirname because the cwd should be inside a cmt
# directory
return {'this':os.path.dirname(os.getcwd())}
def parse_uses():
""" Returns a dict of used packages and their root dir paths.
e.g. {ROOT:/path/to/cmt/installed/ROOT/vXrY}
"""
check_dir()
check_requirements()
proc = subprocess.Popen(['cmt', 'show', 'uses'],
stdout=subprocess.PIPE)
use_dict = init_use_dict()
for line in iter(proc.stdout.readline, ''):
tokens = line.split()
# ignore lines that start with '#'
if line[0] != '#' and tokens[1] not in IGNORES:
basepath = tokens[-1].strip('()')
# highland and psyche do not strictly follow CMT path
# organization. They have subpackages within a master, so
# we need to take that into account
relpath_list = [master for master in tokens[3:-1]]
relpath_list.extend([tokens[1], tokens[2]])
use_dict[tokens[1]] = os.path.join(basepath, *relpath_list)
return use_dict
def get_exts(opts):
if opts.python:
return EXTENSIONS['python']
elif opts.java:
return EXTENSIONS['java']
else:
return EXTENSIONS['cpp']
def build_find_args(exts):
""" ext is a list of file extensions corresponding to the files we want
to search. This will return a list of arguments that can be passed to `find`
"""
find_args = []
for a_ext in exts:
# -o for "or"
find_args.extend(['-o', '-iname'])
find_args.append('{0}'.format(a_ext))
# replace first '-o' with '( for grouping matches
find_args[0] = '('
# append parens for grouping negation
find_args.extend([')', '('])
# Add prune files
for match_type in PRUNE:
for aprune in PRUNE[match_type]:
find_args.append('-not')
find_args.append('-'+match_type)
find_args.append('{0}'.format(aprune))
find_args.append(')')
return find_args
def build_find_cmd(opts, paths):
""" Builds teh cmd file using ctags. Returns cmd based on the following
template: 'find {0} -type f {1} | etags -'
"""
find_args = build_find_args(get_exts(opts))
return ['find']+paths+['-type', 'f']+find_args
def build_tags_cmd():
return ['etags', '-']
def main():
""" Uses ctags to generate TAGS file in cmt directory based on cmt show uses
"""
parser = OptionParser()
parser.add_option('--cpp',
dest='cpp',
action='store_true',
default=False,
help='tag only c/cpp files (default)')
parser.add_option('--python',
dest='python',
action='store_true',
default=False,
help='tag only python files')
parser.add_option('--java',
dest='java',
action='store_true',
default=False,
help='tag only java files')
parser.add_option('-n',
dest='dry_run',
action='store_true',
default=False,
help='dry run')
(opts, args) = parser.parse_args()
# get the cmt show uses dictionary of programs and paths
use_dict = parse_uses()
# build the commands
find_cmd = build_find_cmd(opts, list(use_dict.itervalues()))
tags_cmd = build_tags_cmd()
print 'Creating TAGS file based on dependencies:'
print use_dict
if not opts.dry_run:
find_proc = subprocess.Popen(find_cmd, stdout=subprocess.PIPE)
tags_proc = subprocess.Popen(tags_cmd, stdin=find_proc.stdout)
tags_proc.communicate()
if __name__ == '__main__':
main()
| [] |
rickh563/salt | salt/daemons/masterapi.py | 02822d6466c47d0daafd6e98b4e767a396b0ed48 | # -*- coding: utf-8 -*-
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
from __future__ import absolute_import
# Import python libs
import fnmatch
import logging
import os
import re
import time
import stat
import tempfile
# Import salt libs
import salt.crypt
import salt.utils
import salt.client
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.search
import salt.key
import salt.fileserver
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.verify
import salt.utils.minions
import salt.utils.gzip_util
import salt.utils.jid
from salt.pillar import git_pillar
from salt.utils.event import tagify
from salt.exceptions import SaltMasterError
# Import 3rd-party libs
import salt.ext.six as six
try:
import pwd
HAS_PWD = True
except ImportError:
# pwd is not available on windows
HAS_PWD = False
log = logging.getLogger(__name__)
# Things to do in lower layers:
# only accept valid minion ids
def init_git_pillar(opts):
'''
Clear out the ext pillar caches, used when the master starts
'''
pillargitfs = []
for opts_dict in [x for x in opts.get('ext_pillar', [])]:
if 'git' in opts_dict:
try:
import git
except ImportError:
return pillargitfs
parts = opts_dict['git'].strip().split()
try:
br = parts[0]
loc = parts[1]
except IndexError:
log.critical(
'Unable to extract external pillar data: {0}'
.format(opts_dict['git'])
)
else:
pillargitfs.append(
git_pillar.GitPillar(
br,
loc,
opts
)
)
return pillargitfs
def clean_fsbackend(opts):
'''
Clean out the old fileserver backends
'''
# Clear remote fileserver backend caches so they get recreated
for backend in ('git', 'hg', 'svn'):
if backend in opts['fileserver_backend']:
env_cache = os.path.join(
opts['cachedir'],
'{0}fs'.format(backend),
'envs.p'
)
if os.path.isfile(env_cache):
log.debug('Clearing {0}fs env cache'.format(backend))
try:
os.remove(env_cache)
except OSError as exc:
log.critical(
'Unable to clear env cache file {0}: {1}'
.format(env_cache, exc)
)
file_lists_dir = os.path.join(
opts['cachedir'],
'file_lists',
'{0}fs'.format(backend)
)
try:
file_lists_caches = os.listdir(file_lists_dir)
except OSError:
continue
for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'):
cache_file = os.path.join(file_lists_dir, file_lists_cache)
try:
os.remove(cache_file)
except OSError as exc:
log.critical(
'Unable to file_lists cache file {0}: {1}'
.format(cache_file, exc)
)
def clean_expired_tokens(opts):
'''
Clean expired tokens from the master
'''
serializer = salt.payload.Serial(opts)
for (dirpath, dirnames, filenames) in os.walk(opts['token_dir']):
for token in filenames:
token_path = os.path.join(dirpath, token)
with salt.utils.fopen(token_path) as token_file:
token_data = serializer.loads(token_file.read())
if 'expire' not in token_data or token_data.get('expire', 0) < time.time():
try:
os.remove(token_path)
except (IOError, OSError):
pass
def clean_pub_auth(opts):
try:
auth_cache = os.path.join(opts['cachedir'], 'publish_auth')
if not os.path.exists(auth_cache):
return
else:
for (dirpath, dirnames, filenames) in os.walk(auth_cache):
for auth_file in filenames:
auth_file_path = os.path.join(dirpath, auth_file)
if not os.path.isfile(auth_file_path):
continue
if os.path.getmtime(auth_file_path) - time.time() > opts['keep_jobs']:
os.remove(auth_file_path)
except (IOError, OSError):
log.error('Unable to delete pub auth file')
def clean_old_jobs(opts):
'''
Clean out the old jobs from the job cache
'''
# TODO: better way to not require creating the masterminion every time?
mminion = salt.minion.MasterMinion(
opts,
states=False,
rend=False,
)
# If the master job cache has a clean_old_jobs, call it
fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache'])
if fstr in mminion.returners:
mminion.returners[fstr]()
def access_keys(opts):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
users = []
keys = {}
acl_users = set(opts['client_acl'].keys())
if opts.get('user'):
acl_users.add(opts['user'])
acl_users.add(salt.utils.get_user())
if HAS_PWD:
for user in pwd.getpwall():
users.append(user.pw_name)
for user in acl_users:
log.info(
'Preparing the {0} key for local communication'.format(
user
)
)
if HAS_PWD:
if user not in users:
try:
user = pwd.getpwnam(user).pw_name
except KeyError:
log.error('ACL user {0} is not available'.format(user))
continue
keyfile = os.path.join(
opts['cachedir'], '.{0}_key'.format(user)
)
if os.path.exists(keyfile):
log.debug('Removing stale keyfile: {0}'.format(keyfile))
os.unlink(keyfile)
key = salt.crypt.Crypticle.generate_key_string()
cumask = os.umask(191)
with salt.utils.fopen(keyfile, 'w+') as fp_:
fp_.write(key)
os.umask(cumask)
# 600 octal: Read and write access to the owner only.
# Write access is necessary since on subsequent runs, if the file
# exists, it needs to be written to again. Windows enforces this.
os.chmod(keyfile, 0o600)
if HAS_PWD:
try:
os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1)
except OSError:
# The master is not being run as root and can therefore not
# chown the key file
pass
keys[user] = key
return keys
def fileserver_update(fileserver):
'''
Update the fileserver backends, requires that a built fileserver object
be passed in
'''
try:
if not fileserver.servers:
log.error(
'No fileservers loaded, the master will not be able to '
'serve files to minions'
)
raise SaltMasterError('No fileserver backends available')
fileserver.update()
except Exception as exc:
log.error(
'Exception {0} occurred in file server update'.format(exc),
exc_info_on_loglevel=logging.DEBUG
)
class AutoKey(object):
'''
Implement the methods to run auto key acceptance and rejection
'''
def __init__(self, opts):
self.opts = opts
def check_permissions(self, filename):
'''
Check if the specified filename has correct permissions
'''
if salt.utils.is_windows():
return True
# After we've ascertained we're not on windows
try:
user = self.opts['user']
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = salt.utils.get_gid_list(user, include_default=False)
except KeyError:
log.error(
'Failed to determine groups for user {0}. The user is not '
'available.\n'.format(
user
)
)
return False
fmode = os.stat(filename)
if os.getuid() == 0:
if fmode.st_uid == uid or fmode.st_gid != gid:
return True
elif self.opts.get('permissive_pki_access', False) \
and fmode.st_gid in groups:
return True
else:
if stat.S_IWOTH & fmode.st_mode:
# don't allow others to write to the file
return False
# check group flags
if self.opts.get('permissive_pki_access', False) and stat.S_IWGRP & fmode.st_mode:
return True
elif stat.S_IWGRP & fmode.st_mode:
return False
# check if writable by group or other
if not (stat.S_IWGRP & fmode.st_mode or
stat.S_IWOTH & fmode.st_mode):
return True
return False
def check_signing_file(self, keyid, signing_file):
'''
Check a keyid for membership in a signing file
'''
if not signing_file or not os.path.exists(signing_file):
return False
if not self.check_permissions(signing_file):
message = 'Wrong permissions for {0}, ignoring content'
log.warn(message.format(signing_file))
return False
with salt.utils.fopen(signing_file, 'r') as fp_:
for line in fp_:
line = line.strip()
if line.startswith('#'):
continue
else:
if salt.utils.expr_match(keyid, line):
return True
return False
def check_autosign_dir(self, keyid):
'''
Check a keyid for membership in a autosign directory.
'''
autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign')
# cleanup expired files
expire_minutes = self.opts.get('autosign_expire_minutes', 10)
if expire_minutes > 0:
min_time = time.time() - (60 * int(expire_minutes))
for root, dirs, filenames in os.walk(autosign_dir):
for f in filenames:
stub_file = os.path.join(autosign_dir, f)
mtime = os.path.getmtime(stub_file)
if mtime < min_time:
log.warn('Autosign keyid expired {0}'.format(stub_file))
os.remove(stub_file)
stub_file = os.path.join(autosign_dir, keyid)
if not os.path.exists(stub_file):
return False
os.remove(stub_file)
return True
def check_autoreject(self, keyid):
'''
Checks if the specified keyid should automatically be rejected.
'''
return self.check_signing_file(
keyid,
self.opts.get('autoreject_file', None)
)
def check_autosign(self, keyid):
'''
Checks if the specified keyid should automatically be signed.
'''
if self.opts['auto_accept']:
return True
if self.check_signing_file(keyid, self.opts.get('autosign_file', None)):
return True
if self.check_autosign_dir(keyid):
return True
return False
class RemoteFuncs(object):
'''
Funcitons made available to minions, this class includes the raw routines
post validation that make up the minion access to the master
'''
def __init__(self, opts):
self.opts = opts
self.event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Create the tops dict for loading external top data
self.tops = salt.loader.tops(self.opts)
# Make a client
self.local = salt.client.get_local_client(mopts=self.opts)
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
self.__setup_fileserver()
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = fs_.serve_file
self._file_hash = fs_.file_hash
self._file_list = fs_.file_list
self._file_list_emptydirs = fs_.file_list_emptydirs
self._dir_list = fs_.dir_list
self._symlink_list = fs_.symlink_list
self._file_envs = fs_.envs
def __verify_minion_publish(self, load):
'''
Verify that the passed information authorized a minion to execute
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in load for key in ('fun', 'arg', 'tgt', 'ret', 'id')):
return False
# If the command will make a recursive publish don't run
if re.match('publish.*', load['fun']):
return False
# Check the permissions for this minion
perms = []
for match in self.opts['peer']:
if re.match(match, load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
load['fun'] = load['fun'].split(',')
arg_ = []
for arg in load['arg']:
arg_.append(arg.split())
load['arg'] = arg_
good = self.ckminions.auth_check(
perms,
load['fun'],
load['tgt'],
load.get('tgt_type', 'glob'),
publish_validate=True)
if not good:
return False
return True
def _master_opts(self, load):
'''
Return the master options to the minion
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _ext_nodes(self, load, skip_verify=False):
'''
Return the results from an external node classifier if one is
specified
'''
if not skip_verify:
if 'id' not in load:
log.error('Received call for external nodes without an id')
return {}
if not salt.utils.verify.valid_id(self.opts, load['id']):
return {}
# Evaluate all configured master_tops interfaces
opts = {}
grains = {}
ret = {}
if 'opts' in load:
opts = load['opts']
if 'grains' in load['opts']:
grains = load['opts']['grains']
for fun in self.tops:
if fun not in self.opts.get('master_tops', {}):
continue
try:
ret.update(self.tops[fun](opts=opts, grains=grains))
except Exception as exc:
# If anything happens in the top generation, log it and move on
log.error(
'Top function {0} failed with error {1} for minion '
'{2}'.format(
fun, exc, load['id']
)
)
return ret
def _mine_get(self, load, skip_verify=False):
'''
Gathers the data from the specified minions' mine
'''
if not skip_verify:
if any(key not in load for key in ('id', 'tgt', 'fun')):
return {}
if 'mine_get' in self.opts:
# If master side acl defined.
if not isinstance(self.opts['mine_get'], dict):
return {}
perms = set()
for match in self.opts['mine_get']:
if re.match(match, load['id']):
if isinstance(self.opts['mine_get'][match], list):
perms.update(self.opts['mine_get'][match])
if not any(re.match(perm, load['fun']) for perm in perms):
return {}
ret = {}
if not salt.utils.verify.valid_id(self.opts, load['id']):
return ret
match_type = load.get('expr_form', 'glob')
if match_type.lower() == 'pillar':
match_type = 'pillar_exact'
if match_type.lower() == 'compound':
match_type = 'compound_pillar_exact'
checker = salt.utils.minions.CkMinions(self.opts)
minions = checker.check_minions(
load['tgt'],
match_type,
greedy=False
)
for minion in minions:
mine = os.path.join(
self.opts['cachedir'],
'minions',
minion,
'mine.p')
try:
with salt.utils.fopen(mine, 'rb') as fp_:
fdata = self.serial.load(fp_).get(load['fun'])
if fdata:
ret[minion] = fdata
except Exception:
continue
return ret
def _mine(self, load, skip_verify=False):
'''
Return the mine data
'''
if not skip_verify:
if 'id' not in load or 'data' not in load:
return False
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'mine.p')
if not load.get('clear', False):
if os.path.isfile(datap):
with salt.utils.fopen(datap, 'rb') as fp_:
new = self.serial.load(fp_)
if isinstance(new, dict):
new.update(load['data'])
load['data'] = new
with salt.utils.fopen(datap, 'w+b') as fp_:
fp_.write(self.serial.dumps(load['data']))
return True
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
'''
if 'id' not in load or 'fun' not in load:
return False
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
return False
datap = os.path.join(cdir, 'mine.p')
if os.path.isfile(datap):
try:
with salt.utils.fopen(datap, 'rb') as fp_:
mine_data = self.serial.load(fp_)
if isinstance(mine_data, dict):
if mine_data.pop(load['fun'], False):
with salt.utils.fopen(datap, 'w+b') as fp_:
fp_.write(self.serial.dumps(mine_data))
except OSError:
return False
return True
def _mine_flush(self, load, skip_verify=False):
'''
Allow the minion to delete all of its own mine contents
'''
if not skip_verify and 'id' not in load:
return False
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
return False
datap = os.path.join(cdir, 'mine.p')
if os.path.isfile(datap):
try:
os.remove(datap)
except OSError:
return False
return True
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not self.opts['file_recv'] or os.path.isabs(load['path']):
return False
if os.path.isabs(load['path']) or '../' in load['path']:
# Can overwrite master files!!
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size']
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > file_recv_max_size:
log.error(
'Exceeding file_recv_max_size limit: {0}'.format(
file_recv_max_size
)
)
return False
# Normalize Windows paths
normpath = load['path']
if ':' in normpath:
# make sure double backslashes are normalized
normpath = normpath.replace('\\', '/')
normpath = os.path.normpath(normpath)
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(load['data'])
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
pillar = salt.pillar.Pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
load.get('ext'),
self.mminion.functions,
pillar=load.get('pillar_override', {}))
pillar_dirs = {}
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
if self.opts.get('minion_data_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'data.p')
tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
os.close(tmpfh)
with salt.utils.fopen(tmpfname, 'w+b') as fp_:
fp_.write(
self.serial.dumps(
{'grains': load['grains'],
'pillar': data})
)
# On Windows, os.rename will fail if the destination file exists.
salt.utils.atomicfile.atomic_rename(tmpfname, datap)
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
'''
if 'id' not in load:
return False
if 'events' not in load and ('tag' not in load or 'data' not in load):
return False
if 'events' in load:
for event in load['events']:
self.event.fire_event(event, event['tag']) # old dup event
if load.get('pretag') is not None:
if 'data' in event:
self.event.fire_event(event['data'], tagify(event['tag'], base=load['pretag']))
else:
self.event.fire_event(event, tagify(event['tag'], base=load['pretag']))
else:
tag = load['tag']
self.event.fire_event(load, tag)
return True
def _return(self, load):
'''
Handle the return data sent from the minions
'''
# Generate EndTime
endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid())
# If the return data is invalid, just ignore it
if any(key not in load for key in ('return', 'jid', 'id')):
return False
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False))
# save the load, since we don't have it
saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[saveload_fstr](load['jid'], load)
log.info('Got return from {id} for job {jid}'.format(**load))
self.event.fire_event(load, load['jid']) # old dup event
self.event.fire_event(load, tagify([load['jid'], 'ret', load['id']], 'job'))
self.event.fire_ret_load(load)
if not self.opts['job_cache'] or self.opts.get('ext_job_cache'):
return
fstr = '{0}.update_endtime'.format(self.opts['master_job_cache'])
if (self.opts.get('job_cache_store_endtime')
and fstr in self.mminion.returners):
self.mminion.returners[fstr](load['jid'], endtime)
fstr = '{0}.returner'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
'''
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
return None
# if we have a load, save it
if 'load' in load:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key,
'return': item}
if 'out' in load:
ret['out'] = load['out']
self._return(ret)
def minion_runner(self, load):
'''
Execute a runner from a minion, return the runner's function data
'''
if 'peer_run' not in self.opts:
return {}
if not isinstance(self.opts['peer_run'], dict):
return {}
if any(key not in load for key in ('fun', 'arg', 'id')):
return {}
perms = set()
for match in self.opts['peer_run']:
if re.match(match, load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer_run'][match], list):
perms.update(self.opts['peer_run'][match])
good = False
for perm in perms:
if re.match(perm, load['fun']):
good = True
if not good:
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
# Prepare the runner object
opts = {'fun': load['fun'],
'arg': load['arg'],
'id': load['id'],
'doc': False,
'conf_file': self.opts['conf_file']}
opts.update(self.opts)
runner = salt.runner.Runner(opts)
return runner.run()
def pub_ret(self, load, skip_verify=False):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
'''
if not skip_verify and any(key not in load for key in ('jid', 'id')):
return {}
else:
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, load['jid'])
with salt.utils.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(load):
return {}
# Set up the publication payload
pub_load = {
'fun': load['fun'],
'arg': load['arg'],
'expr_form': load.get('tgt_type', 'glob'),
'tgt': load['tgt'],
'ret': load['ret'],
'id': load['id'],
}
if 'tgt_type' in load:
if load['tgt_type'].startswith('node'):
if load['tgt'] in self.opts['nodegroups']:
pub_load['tgt'] = self.opts['nodegroups'][load['tgt']]
pub_load['expr_form_type'] = 'compound'
pub_load['expr_form'] = load['tgt_type']
else:
return {}
else:
pub_load['expr_form'] = load['tgt_type']
ret = {}
ret['jid'] = self.local.cmd_async(**pub_load)
ret['minions'] = self.ckminions.check_minions(
load['tgt'],
pub_load['expr_form'])
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, str(ret['jid']))
with salt.utils.fopen(jid_fn, 'w+') as fp_:
fp_.write(load['id'])
return ret
def minion_publish(self, load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(load):
return {}
# Set up the publication payload
pub_load = {
'fun': load['fun'],
'arg': load['arg'],
'expr_form': load.get('tgt_type', 'glob'),
'tgt': load['tgt'],
'ret': load['ret'],
'id': load['id'],
}
if 'tmo' in load:
try:
pub_load['timeout'] = int(load['tmo'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
load['tmo'])
log.warn(msg)
return {}
if 'timeout' in load:
try:
pub_load['timeout'] = int(load['timeout'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
load['timeout'])
log.warn(msg)
return {}
if 'tgt_type' in load:
if load['tgt_type'].startswith('node'):
if load['tgt'] in self.opts['nodegroups']:
pub_load['tgt'] = self.opts['nodegroups'][load['tgt']]
pub_load['expr_form_type'] = 'compound'
else:
return {}
else:
pub_load['expr_form'] = load['tgt_type']
pub_load['raw'] = True
ret = {}
for minion in self.local.cmd_iter(**pub_load):
if load.get('form', '') == 'full':
data = minion
if 'jid' in minion:
ret['__jid__'] = minion['jid']
data['ret'] = data.pop('return')
ret[minion['id']] = data
else:
ret[minion['id']] = minion['return']
if 'jid' in minion:
ret['__jid__'] = minion['jid']
for key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])):
if key not in ret:
ret[key] = val
if load.get('form', '') != 'full':
ret.pop('__jid__')
return ret
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
'''
if 'id' not in load:
return False
keyapi = salt.key.Key(self.opts)
keyapi.delete_key(load['id'],
preserve_minions=load.get('preserve_minion_cache',
False))
return True
class LocalFuncs(object):
'''
Set up methods for use only from the local system
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.key = key
# Create the event manager
self.event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
# Make a client
self.local = salt.client.get_local_client(mopts=self.opts)
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
def runner(self, load):
'''
Send a master control function back to the runner system
'''
if 'token' in load:
try:
token = self.loadauth.get_tok(load['token'])
except Exception as exc:
msg = 'Exception occurred when generating auth token: {0}'.format(
exc)
log.error(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if not token:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['eauth'] not in self.opts['external_auth']:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
good = self.ckminions.runner_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
try:
fun = load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(
fun,
load.get('kwarg', {}),
token['name'])
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
if 'eauth' not in load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
name = self.loadauth.load_name(load)
if not (name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']]):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if not self.loadauth.time_auth(load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
good = self.ckminions.runner_check(
self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][load['eauth']]['*'],
load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
fun = load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(fun,
load.get('kwarg', {}),
load.get('username', 'UNKNOWN'))
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
except Exception as exc:
log.error(
'Exception occurred in the runner system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
def wheel(self, load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
if 'token' in load:
try:
token = self.loadauth.get_tok(load['token'])
except Exception as exc:
msg = 'Exception occurred when generating auth token: {0}'.format(
exc)
log.error(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if not token:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['eauth'] not in self.opts['external_auth']:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
good = self.ckminions.wheel_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
jid = salt.utils.jid.gen_jid()
fun = load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': token['name']}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(exc)
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
if 'eauth' not in load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
name = self.loadauth.load_name(load)
if not ((name in self.opts['external_auth'][load['eauth']]) |
('*' in self.opts['external_auth'][load['eauth']])):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if not self.loadauth.time_auth(load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
good = self.ckminions.wheel_check(
self.opts['external_auth'][load['eauth']][name]
if name in self.opts['external_auth'][load['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
jid = salt.utils.jid.gen_jid()
fun = load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': load.get('username', 'UNKNOWN')}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(
'Exception occurred in the wheel system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
def mk_token(self, load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
if 'eauth' not in load:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning('Authentication failure of type "eauth" occurred.')
return ''
try:
name = self.loadauth.load_name(load)
if not ((name in self.opts['external_auth'][load['eauth']]) |
('*' in self.opts['external_auth'][load['eauth']])):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if not self.loadauth.time_auth(load):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return self.loadauth.mk_token(load)
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
def get_token(self, load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in load:
return False
return self.loadauth.get_tok(load['token'])
def publish(self, load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = load.get('kwargs', {})
# check blacklist/whitelist
good = True
# Check if the user is blacklisted
for user_re in self.opts['client_acl_blacklist'].get('users', []):
if re.match(user_re, load['user']):
good = False
break
# check if the cmd is blacklisted
for module_re in self.opts['client_acl_blacklist'].get('modules', []):
# if this is a regular command, its a single function
if isinstance(load['fun'], str):
funs_to_check = [load['fun']]
# if this a compound function
else:
funs_to_check = load['fun']
for fun in funs_to_check:
if re.match(module_re, fun):
good = False
break
if good is False:
log.error(
'{user} does not have permissions to run {function}. Please '
'contact your local administrator if you believe this is in '
'error.\n'.format(
user=load['user'],
function=load['fun']
)
)
return ''
# to make sure we don't step on anyone else's toes
del good
# Check for external auth calls
if extra.get('token', False):
# A token was passed, check it
try:
token = self.loadauth.get_tok(extra['token'])
except Exception as exc:
log.error(
'Exception occurred when generating auth token: {0}'.format(
exc
)
)
return ''
if not token:
log.warning('Authentication failure of type "token" occurred. \
Token could not be retrieved.')
return ''
if token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred. \
Authentication type of {0} not present.').format(token['eauth'])
return ''
if not ((token['name'] in self.opts['external_auth'][token['eauth']]) |
('*' in self.opts['external_auth'][token['eauth']])):
log.warning('Authentication failure of type "token" occurred. \
Token does not verify against eauth provider: {0}').format(
self.opts['external_auth'])
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
load['fun'],
load['tgt'],
load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "token" occurred.'
)
return ''
load['user'] = token['name']
log.debug('Minion tokenized user = "{0}"'.format(load['user']))
elif 'eauth' in extra:
if extra['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
try:
name = self.loadauth.load_name(extra)
if not ((name in self.opts['external_auth'][extra['eauth']]) |
('*' in self.opts['external_auth'][extra['eauth']])):
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
if not self.loadauth.time_auth(extra):
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][extra['eauth']][name]
if name in self.opts['external_auth'][extra['eauth']]
else self.opts['external_auth'][extra['eauth']]['*'],
load['fun'],
load['tgt'],
load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
load['user'] = name
# Verify that the caller has root on master
elif 'user' in load:
if load['user'].startswith('sudo_'):
# If someone can sudo, allow them to act as root
if load.get('key', 'invalid') == self.key.get('root'):
load.pop('key')
elif load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif load['user'] == self.opts.get('user', 'root'):
if load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif load['user'] == 'root':
if load.pop('key') != self.key.get(self.opts.get('user', 'root')):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif load['user'] == salt.utils.get_user():
if load.pop('key') != self.key.get(load['user']):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if load['user'] in self.key:
# User is authorised, check key and check perms
if load.pop('key') != self.key[load['user']]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
if load['user'] not in self.opts['client_acl']:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
good = self.ckminions.auth_check(
self.opts['client_acl'][load['user']],
load['fun'],
load['tgt'],
load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "user" '
'occurred.'
)
return ''
else:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if load.pop('key') != self.key[salt.utils.get_user()]:
log.warning(
'Authentication failure of type "other" occurred.'
)
return ''
# Retrieve the minions list
minions = self.ckminions.check_minions(
load['tgt'],
load.get('tgt_type', 'glob')
)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions
}
}
# Retrieve the jid
if not load['jid']:
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False))
self.event.fire_event({'minions': minions}, load['jid'])
new_job_load = {
'jid': load['jid'],
'tgt_type': load['tgt_type'],
'tgt': load['tgt'],
'user': load['user'],
'fun': load['fun'],
'arg': load['arg'],
'minions': minions,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, 'new_job') # old dup event
self.event.fire_event(new_job_load, tagify([load['jid'], 'new'], 'job'))
# Save the invocation information
if self.opts['ext_job_cache']:
try:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
self.mminion.returners[fstr](load['jid'], load)
except KeyError:
log.critical(
'The specified returner used for the external job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['ext_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job cache
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['master_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
pub_load = {
'fun': load['fun'],
'arg': load['arg'],
'tgt': load['tgt'],
'jid': load['jid'],
'ret': load['ret'],
}
if 'id' in extra:
pub_load['id'] = extra['id']
if 'tgt_type' in load:
pub_load['tgt_type'] = load['tgt_type']
if 'to' in load:
pub_load['to'] = load['to']
if 'kwargs' in load:
if 'ret_config' in load['kwargs']:
pub_load['ret_config'] = load['kwargs'].get('ret_config')
if 'metadata' in load['kwargs']:
pub_load['metadata'] = load['kwargs'].get('metadata')
if 'user' in load:
log.info(
'User {user} Published command {fun} with jid {jid}'.format(
**load
)
)
pub_load['user'] = load['user']
else:
log.info(
'Published command {fun} with jid {jid}'.format(
**load
)
)
log.debug('Published command details {0}'.format(pub_load))
return {'ret': {
'jid': load['jid'],
'minions': minions
},
'pub': pub_load
}
| [] |
Mohitbalwani26/oppia | core/domain/role_services_test.py | a3d1de8b428b8216bb61ba70315583fe077f5b8a | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test functions relating to roles and actions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import role_services
from core.tests import test_utils
import feconf
import python_utils
class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase):
"""Tests for roles and actions."""
def test_get_role_actions_return_value_in_correct_schema(self):
role_actions = role_services.get_role_actions()
self.assertTrue(isinstance(role_actions, dict))
for role_name, allotted_actions in role_actions.items():
self.assertTrue(isinstance(role_name, python_utils.UNICODE))
self.assertTrue(isinstance(allotted_actions, list))
self.assertEqual(len(set(allotted_actions)), len(allotted_actions))
for action_name in allotted_actions:
self.assertTrue(
isinstance(action_name, python_utils.UNICODE))
def test_get_all_actions(self):
with self.assertRaisesRegexp(
Exception, 'Role TEST_ROLE does not exist.'):
role_services.get_all_actions('TEST_ROLE')
self.assertEqual(
role_services.get_all_actions(feconf.ROLE_ID_GUEST),
[role_services.ACTION_PLAY_ANY_PUBLIC_ACTIVITY])
| [((1141, 1173), 'core.domain.role_services.get_role_actions', 'role_services.get_role_actions', ([], {}), '()\n', (1171, 1173), False, 'from core.domain import role_services\n'), ((1807, 1849), 'core.domain.role_services.get_all_actions', 'role_services.get_all_actions', (['"""TEST_ROLE"""'], {}), "('TEST_ROLE')\n", (1836, 1849), False, 'from core.domain import role_services\n'), ((1889, 1940), 'core.domain.role_services.get_all_actions', 'role_services.get_all_actions', (['feconf.ROLE_ID_GUEST'], {}), '(feconf.ROLE_ID_GUEST)\n', (1918, 1940), False, 'from core.domain import role_services\n')] |
xpennec/applications | deep_learning/keras/keras/backend/cntk_backend.py | 50aefdf14de308fc3c132784ebba9d329e47b087 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cntk as C
import numpy as np
from .common import floatx, epsilon, image_dim_ordering, image_data_format
from collections import defaultdict
from contextlib import contextmanager
import warnings
C.set_global_option('align_axis', 1)
b_any = any
dev = C.device.use_default_device()
if dev.type() == 0:
warnings.warn(
'CNTK backend warning: GPU is not detected. '
'CNTK\'s CPU version is not fully optimized,'
'please run with GPU to get better performance.')
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
# LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase
_LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase')
# static learning phase flag, if it is not 0 or 1, we will go with dynamic learning phase tensor.
_LEARNING_PHASE = -1
_UID_PREFIXES = defaultdict(int)
# cntk doesn't support gradient as symbolic op, to hook up with keras model,
# we will create gradient as a constant placeholder, here use this global
# map to keep the mapping from grad placeholder to parameter
grad_parameter_dict = {}
NAME_SCOPE_STACK = []
@contextmanager
def name_scope(name):
global NAME_SCOPE_STACK
NAME_SCOPE_STACK.append(name)
yield
NAME_SCOPE_STACK.pop()
def get_uid(prefix=''):
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def learning_phase():
# If _LEARNING_PHASE is not 0 or 1, return dynamic learning phase tensor
return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER
def set_learning_phase(value):
global _LEARNING_PHASE
if value not in {0, 1}:
raise ValueError('CNTK Backend: Set learning phase '
'with value %s is not supported, '
'expected 0 or 1.' % value)
_LEARNING_PHASE = value
def clear_session():
"""Reset learning phase flag for cntk backend.
"""
global _LEARNING_PHASE
global _LEARNING_PHASE_PLACEHOLDER
_LEARNING_PHASE = -1
_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0)
def in_train_phase(x, alt, training=None):
global _LEARNING_PHASE
if training is None:
training = learning_phase()
uses_learning_phase = True
else:
uses_learning_phase = False
# CNTK currently don't support cond op, so here we use
# element_select approach as workaround. It may have
# perf issue, will resolve it later with cntk cond op.
if callable(x) and isinstance(x, C.cntk_py.Function) is False:
x = x()
if callable(alt) and isinstance(alt, C.cntk_py.Function) is False:
alt = alt()
if training is True:
x._uses_learning_phase = uses_learning_phase
return x
else:
# if _LEARNING_PHASE is static
if isinstance(training, int) or isinstance(training, bool):
result = x if training == 1 or training is True else alt
else:
result = C.element_select(training, x, alt)
result._uses_learning_phase = uses_learning_phase
return result
def in_test_phase(x, alt, training=None):
return in_train_phase(alt, x, training=training)
def _convert_string_dtype(dtype):
# cntk only support float32 and float64
if dtype == 'float32':
return np.float32
elif dtype == 'float64':
return np.float64
else:
# cntk only running with float,
# try to cast to float to run the model
return np.float32
def _convert_dtype_string(dtype):
if dtype == np.float32:
return 'float32'
elif dtype == np.float64:
return 'float64'
else:
raise ValueError('CNTK Backend: Unsupported dtype: %s. '
'CNTK only supports float32 and '
'float64.' % dtype)
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
# Arguments
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
# Returns
A variable instance (with Keras metadata included).
"""
if dtype is None:
dtype = floatx()
if name is None:
name = ''
if isinstance(
value,
C.variables.Constant) or isinstance(
value,
C.variables.Parameter):
value = value.value
# we don't support init parameter with symbolic op, so eval it first as
# workaround
if isinstance(value, C.cntk_py.Function):
value = eval(value)
shape = value.shape if hasattr(value, 'shape') else ()
if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0:
value = value.astype(dtype)
# TODO: remove the conversion when cntk supports int32, int64
# https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter
dtype = 'float32' if 'int' in str(dtype) else dtype
v = C.parameter(shape=shape,
init=value,
dtype=dtype,
name=_prepare_name(name, 'variable'))
v._keras_shape = v.shape
v._uses_learning_phase = False
v.constraint = constraint
return v
def bias_add(x, bias, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
dims = len(x.shape)
if dims > 0 and x.shape[0] == C.InferredDimension:
dims -= 1
bias_dims = len(bias.shape)
if bias_dims != 1 and bias_dims != dims:
raise ValueError('Unexpected bias dimensions %d, '
'expected 1 or %d dimensions' % (bias_dims, dims))
if dims == 4:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1, 1)
else:
shape = (bias.shape[3],) + bias.shape[:3]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 3:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1)
else:
shape = (bias.shape[2],) + bias.shape[:2]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 2:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1)
else:
shape = (bias.shape[1],) + bias.shape[:1]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, bias.shape[0])
else:
shape = bias.shape
else:
shape = bias.shape
return x + reshape(bias, shape)
def eval(x):
if isinstance(x, C.cntk_py.Function):
return x.eval()
elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter):
return x.value
else:
raise ValueError('CNTK Backend: `eval` method on '
'`%s` type is not supported. '
'CNTK only supports `eval` with '
'`Function`, `Constant` or '
'`Parameter`.' % type(x))
def placeholder(
shape=None,
ndim=None,
dtype=None,
sparse=False,
name=None,
dynamic_axis_num=1):
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension
cntk_shape = [dynamic_dimension if s is None else s for s in shape]
cntk_shape = tuple(cntk_shape)
if dynamic_axis_num > len(cntk_shape):
raise ValueError('CNTK backend: creating placeholder with '
'%d dimension is not supported, at least '
'%d dimensions are needed.'
% (len(cntk_shape, dynamic_axis_num)))
if name is None:
name = ''
cntk_shape = cntk_shape[dynamic_axis_num:]
x = C.input(
shape=cntk_shape,
dtype=_convert_string_dtype(dtype),
is_sparse=sparse,
name=name)
x._keras_shape = shape
x._uses_learning_phase = False
x._cntk_placeholder = True
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
# Arguments
x: A candidate placeholder.
# Returns
Boolean.
"""
return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder
def is_keras_tensor(x):
if not is_tensor(x):
raise ValueError('Unexpectedly found an instance of type `' +
str(type(x)) + '`. '
'Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
def is_tensor(x):
return isinstance(x, (C.variables.Constant,
C.variables.Variable,
C.variables.Parameter,
C.ops.functions.Function))
def shape(x):
shape = list(int_shape(x))
num_dynamic = _get_dynamic_axis_num(x)
non_dyn_shape = []
for i in range(len(x.shape)):
if shape[i + num_dynamic] is None:
non_dyn_shape.append(x.shape[i])
else:
non_dyn_shape.append(shape[i + num_dynamic])
return shape[:num_dynamic] + non_dyn_shape
def is_sparse(tensor):
return tensor.is_sparse
def int_shape(x):
if hasattr(x, '_keras_shape'):
return x._keras_shape
shape = x.shape
if hasattr(x, 'dynamic_axes'):
dynamic_shape = [None for a in x.dynamic_axes]
shape = tuple(dynamic_shape) + shape
return shape
def ndim(x):
shape = int_shape(x)
return len(shape)
def _prepare_name(name, default):
prefix = '_'.join(NAME_SCOPE_STACK)
if name is None or name == '':
return prefix + '/' + default
return prefix + '/' + name
def constant(value, dtype=None, shape=None, name=None):
if dtype is None:
dtype = floatx()
if shape is None:
shape = ()
np_value = value * np.ones(shape)
const = C.constant(np_value,
dtype=dtype,
name=_prepare_name(name, 'constant'))
const._keras_shape = const.shape
const._uses_learning_phase = False
return const
def random_binomial(shape, p=0.0, dtype=None, seed=None):
# use numpy workaround now
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e7)
np.random.seed(seed)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
size = 1
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
size *= _
binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape)
return variable(value=binomial, dtype=dtype)
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
return random_uniform_variable(shape, minval, maxval, dtype, seed)
def random_uniform_variable(shape, low, high,
dtype=None, name=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e3)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
if name is None:
name = ''
scale = (high - low) / 2
p = C.parameter(
shape,
init=C.initializer.uniform(
scale,
seed=seed),
dtype=dtype,
name=name)
return variable(value=p.value + low + scale)
def random_normal_variable(
shape,
mean,
scale,
dtype=None,
name=None,
seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e7)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
if name is None:
name = ''
return C.parameter(
shape=shape,
init=C.initializer.normal(
scale=scale,
seed=seed),
dtype=dtype,
name=name)
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
# how to apply mean and stddev
return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if seed is None:
seed = np.random.randint(1, 10e6)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
return C.parameter(
shape, init=C.initializer.truncated_normal(
stddev, seed=seed), dtype=dtype)
def dtype(x):
return _convert_dtype_string(x.dtype)
def zeros(shape, dtype=None, name=None):
if dtype is None:
dtype = floatx()
ctype = _convert_string_dtype(dtype)
return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name)
def ones(shape, dtype=None, name=None):
if dtype is None:
dtype = floatx()
ctype = _convert_string_dtype(dtype)
return variable(value=np.ones(shape, ctype), dtype=dtype, name=name)
def eye(size, dtype=None, name=None):
if dtype is None:
dtype = floatx()
return variable(np.eye(size), dtype, name)
def zeros_like(x, dtype=None, name=None):
return x * 0
def ones_like(x, dtype=None, name=None):
return zeros_like(x) + 1
def count_params(x):
for _ in x.shape:
if _ == C.InferredDimension or _ == C.FreeDimension:
raise ValueError('CNTK backend: `count_params` with dynamic '
'shape is not supported. Please provide '
'fixed dimension instead of `None`.')
return np.prod(int_shape(x))
def cast(x, dtype):
# cntk calculate everything in float, so don't need case from bool / int
return x
def dot(x, y):
if len(x.shape) > 2 or len(y.shape) > 2:
y_shape = int_shape(y)
if len(y_shape) > 2:
permutation = [len(y_shape) - 2]
permutation += list(range(len(y_shape) - 2))
permutation += [len(y_shape) - 1]
y = C.transpose(y, perm=permutation)
return C.times(x, y, len(y_shape) - 1)
else:
return C.times(x, y)
def batch_dot(x, y, axes=None):
x_shape = int_shape(x)
y_shape = int_shape(y)
if isinstance(axes, int):
axes = (axes, axes)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [len(x_shape) - 1, len(y_shape) - 2]
if b_any([isinstance(a, (list, tuple)) for a in axes]):
raise ValueError('Multiple target dimensions are not supported. ' +
'Expected: None, int, (int, int), ' +
'Provided: ' + str(axes))
if len(x_shape) == 2 and len(y_shape) == 2:
if axes[0] == axes[1]:
result = sum(x * y, axis=axes[0], keepdims=True)
return result if axes[0] == 1 else transpose(result)
else:
return sum(x * transpose(y), axis=axes[0], keepdims=True)
else:
if len(y_shape) == 2:
y = expand_dims(y)
normalized_axis = []
normalized_axis.append(_normalize_axis(axes[0], x)[0])
normalized_axis.append(_normalize_axis(axes[1], y)[0])
# transpose
i = normalized_axis[0]
while i < len(x.shape) - 1:
x = C.swapaxes(x, i, i + 1)
i += 1
i = normalized_axis[1]
while i > 0:
y = C.swapaxes(y, i, i - 1)
i -= 1
result = C.times(x, y, output_rank=(len(y.shape) - 1)
if len(y.shape) > 1 else 1)
if len(y_shape) == 2:
result = squeeze(result, -1)
return result
def transpose(x):
return C.swapaxes(x, 0, 1)
def gather(reference, indices):
# There is a bug in cntk gather op which may cause crash.
# We have made a fix but not catched in CNTK 2.1 release.
# Will update with gather op in next release
if _get_cntk_version() >= 2.2:
return C.ops.gather(reference, indices)
else:
num_classes = reference.shape[0]
one_hot_matrix = C.ops.one_hot(indices, num_classes)
return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1)
def _remove_dims(x, axis, keepdims=False):
if keepdims is False and isinstance(axis, list):
# sequence axis is removed by default, so don't need reshape on it
reduce_axes = []
for a in axis:
if isinstance(a, C.Axis) is False:
reduce_axes.append(a)
return _reshape_dummy_dim(x, reduce_axes)
else:
if isinstance(axis, list):
has_seq = False
for a in axis:
if isinstance(a, C.Axis):
has_seq = True
break
if has_seq:
nones = _get_dynamic_axis_num(x)
x = expand_dims(x, nones)
return x
def max(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_max')
return _remove_dims(output, axis, keepdims)
def min(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_min')
return _remove_dims(output, axis, keepdims)
def sum(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_sum')
return _remove_dims(output, axis, keepdims)
def prod(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_prod')
return _remove_dims(output, axis, keepdims)
def logsumexp(x, axis=None, keepdims=False):
return log(sum(exp(x), axis=axis, keepdims=keepdims))
def var(x, axis=None, keepdims=False):
m = mean(x, axis, keepdims=True)
devs_squared = C.square(x - m)
return mean(devs_squared, axis=axis, keepdims=keepdims)
def std(x, axis=None, keepdims=False):
return C.sqrt(var(x, axis=axis, keepdims=keepdims))
def expand_dims(x, axis=-1):
shape = list(int_shape(x))
nones = _get_dynamic_axis_num(x)
index = axis if axis >= 0 else len(shape) + 1
shape.insert(index, 1)
new_shape = shape[nones:]
new_shape = tuple(
[C.InferredDimension if _ is None else _ for _ in new_shape])
result = C.reshape(x, new_shape)
if index < nones:
result._keras_shape = shape
return result
def squeeze(x, axis):
if isinstance(axis, tuple):
axis = list(axis)
if not isinstance(axis, list):
axis = [axis]
shape = list(int_shape(x))
_axis = []
for _ in axis:
if isinstance(_, int):
_axis.append(_ if _ >= 0 else _ + len(shape))
if len(_axis) == 0:
return x
nones = _get_dynamic_axis_num(x)
for _ in sorted(_axis, reverse=True):
del shape[_]
new_shape = shape[nones:]
new_shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in new_shape])
return C.reshape(x, new_shape)
def tile(x, n):
if isinstance(n, int):
n = (n,)
elif isinstance(n, list):
n = tuple(n)
shape = int_shape(x)
num_dynamic_axis = _get_dynamic_axis_num(x)
# Padding the axis
if len(n) < len(shape):
n = tuple([1 for _ in range(len(shape) - len(n))]) + n
if len(n) != len(shape):
raise NotImplementedError
i = num_dynamic_axis
for i, rep in enumerate(n):
if i >= num_dynamic_axis and shape[i] is not None:
tmp = [x] * rep
x = C.splice(*tmp, axis=i - num_dynamic_axis)
i += 1
return x
def _normalize_axis(axis, x):
shape = int_shape(x)
ndim = len(shape)
nones = _get_dynamic_axis_num(x)
if nones > ndim:
raise ValueError('CNTK Backend: tensor with keras shape: `%s` has '
'%d cntk dynamic axis, this is not expected, please '
'double check the keras shape history.' % (str(shape), nones))
# Current cntk does not support shape like (1, batch). so using the workaround
# here to mapping the correct axis. Will remove this tricky after we add support
# in native cntk op
cntk_axis = []
dynamic_axis_index = 0
for i in range(ndim):
if shape[i] is None and dynamic_axis_index < nones:
cntk_axis.append(x.dynamic_axes[dynamic_axis_index])
dynamic_axis_index += 1
else:
cntk_axis.append(i - dynamic_axis_index)
if dynamic_axis_index < nones:
i = 0
while dynamic_axis_index < nones:
cntk_axis[i] = x.dynamic_axes[dynamic_axis_index]
i += 1
dynamic_axis_index += 1
while i < len(cntk_axis):
cntk_axis[i] -= nones
i += 1
if isinstance(axis, tuple):
_axis = list(axis)
elif isinstance(axis, int):
_axis = [axis]
elif isinstance(axis, list):
_axis = list(axis)
else:
_axis = axis
if isinstance(_axis, list):
for i, a in enumerate(_axis):
if a is not None and a < 0:
_axis[i] = (a % ndim)
if _axis[i] is not None:
_axis[i] = cntk_axis[_axis[i]]
else:
if _axis is None:
_axis = C.Axis.all_axes()
return _axis
def _reshape_dummy_dim(x, axis):
shape = list(x.shape)
_axis = [_ + len(shape) if _ < 0 else _ for _ in axis]
if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) > 1:
result = x
for index in sorted(_axis, reverse=True):
result = C.reshape(result,
shape=(),
begin_axis=index,
end_axis=index + 1)
return result
else:
for index in sorted(_axis, reverse=True):
del shape[index]
shape = [C.InferredDimension if _ == C.FreeDimension else _ for _ in shape]
return C.reshape(x, shape)
def mean(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_mean')
return _remove_dims(output, axis, keepdims)
def any(x, axis=None, keepdims=False):
reduce_result = sum(x, axis, keepdims=keepdims)
any_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(any_matrix)
else:
return any_matrix
def all(x, axis=None, keepdims=False):
reduce_result = prod(x, axis, keepdims=keepdims)
all_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(all_matrix)
else:
return all_matrix
def classification_error(target, output, axis=-1):
return C.ops.reduce_mean(
C.equal(
argmax(
output,
axis=-1),
argmax(
target,
axis=-1)),
axis=C.Axis.all_axes())
def argmax(x, axis=-1):
axis = [axis]
axis = _normalize_axis(axis, x)
output = C.ops.argmax(x, axis=axis[0])
return _reshape_dummy_dim(output, axis)
def argmin(x, axis=-1):
axis = [axis]
axis = _normalize_axis(axis, x)
output = C.ops.argmin(x, axis=axis[0])
return _reshape_dummy_dim(output, axis)
def square(x):
return C.square(x)
def abs(x):
return C.abs(x)
def sqrt(x):
return C.sqrt(x)
def exp(x):
return C.exp(x)
def log(x):
return C.log(x)
def round(x):
return C.round(x)
def sigmoid(x):
return C.sigmoid(x)
def sign(x):
return x / C.abs(x)
def pow(x, a):
return C.pow(x, a)
def clip(x, min_value, max_value):
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
if min_value is None:
min_value = -np.inf
return C.clip(x, min_value, max_value)
def binary_crossentropy(target, output, from_logits=False):
if from_logits:
output = C.sigmoid(output)
output = C.clip(output, epsilon(), 1.0 - epsilon())
output = -target * C.log(output) - (1.0 - target) * C.log(1.0 - output)
return output
def get_variable_shape(x):
return int_shape(x)
def update(x, new_x):
return C.assign(x, new_x)
def moving_average_update(variable, value, momentum):
return C.assign(variable, variable * momentum + value * (1. - momentum))
def update_add(x, increment):
result = x + increment
return C.assign(x, result)
def gradients(loss, variables):
# cntk does not support gradients as symbolic op,
# to hook up with keras model
# we will return a constant as place holder, the cntk learner will apply
# the gradient during training.
global grad_parameter_dict
if isinstance(variables, list) is False:
variables = [variables]
grads = []
for v in variables:
g = C.constant(0, shape=v.shape, name='keras_grad_placeholder')
grads.append(g)
grad_parameter_dict[g] = v
return grads
def equal(x, y):
return C.equal(x, y)
def not_equal(x, y):
return C.not_equal(x, y)
def greater(x, y):
return C.greater(x, y)
def greater_equal(x, y):
return C.greater_equal(x, y)
def less(x, y):
return C.less(x, y)
def less_equal(x, y):
return C.less_equal(x, y)
def maximum(x, y):
return C.element_max(x, y)
def minimum(x, y):
return C.element_min(x, y)
def sin(x):
return C.sin(x)
def cos(x):
return C.cos(x)
def normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
if gamma is None:
if beta is None:
gamma = ones_like(x)
else:
gamma = ones_like(beta)
if beta is None:
if gamma is None:
beta = zeros_like(x)
else:
beta = zeros_like(gamma)
mean, variant = _moments(x, _normalize_axis(reduction_axes, x))
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
normalized = batch_normalization(
x, mean, variant, beta, gamma, epsilon)
else:
# need broadcasting
target_shape = []
x_shape = int_shape(x)
# skip the batch axis
for axis in range(1, ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
if ndim(gamma) > axis:
gamma = C.reduce_mean(gamma, axis - 1)
beta = C.reduce_mean(beta, axis - 1)
else:
target_shape.append(x_shape[axis])
broadcast_mean = C.reshape(mean, target_shape)
broadcast_var = C.reshape(variant, target_shape)
broadcast_gamma = C.reshape(gamma, target_shape)
broadcast_beta = C.reshape(beta, target_shape)
normalized = batch_normalization(
x,
broadcast_mean,
broadcast_var,
broadcast_beta,
broadcast_gamma,
epsilon)
return normalized, mean, variant
def _moments(x, axes=None, shift=None, keep_dims=False):
_axes = tuple(axes)
if shift is None:
shift = x
# Compute true mean while keeping the dims for proper broadcasting.
for axis in _axes:
shift = C.reduce_mean(shift, axis=axis)
shift = C.stop_gradient(shift)
shifted_mean = C.minus(x, shift)
for axis in _axes:
shifted_mean = C.reduce_mean(shifted_mean, axis=axis)
variance_mean = C.square(C.minus(x, shift))
for axis in _axes:
variance_mean = C.reduce_mean(variance_mean, axis=axis)
variance = C.minus(variance_mean, C.square(shifted_mean))
mean = C.plus(shifted_mean, shift)
if not keep_dims:
mean = squeeze(mean, _axes)
variance = squeeze(variance, _axes)
return mean, variance
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
# The mean / var / beta / gamma may be processed by broadcast
# so it may have an extra batch axis with 1, it is not needed
# in cntk, need to remove those dummy axis.
if ndim(mean) == ndim(x) and shape(mean)[0] == 1:
mean = _reshape_dummy_dim(mean, [0])
if ndim(var) == ndim(x) and shape(var)[0] == 1:
var = _reshape_dummy_dim(var, [0])
if gamma is None:
gamma = ones_like(var)
elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1:
gamma = _reshape_dummy_dim(gamma, [0])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) == ndim(x) and shape(beta)[0] == 1:
beta = _reshape_dummy_dim(beta, [0])
return (x - mean) / (C.sqrt(var) + epsilon) * gamma + beta
def concatenate(tensors, axis=-1):
if len(tensors) == 0:
return None
axis = [axis]
axis = _normalize_axis(axis, tensors[0])
return C.splice(*tensors, axis=axis[0])
def flatten(x):
return reshape(x, (-1,))
def reshape(x, shape):
shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in shape])
if isinstance(x, C.variables.Parameter):
return C.reshape(x, shape)
else:
num_dynamic_axis = _get_dynamic_axis_num(x)
if num_dynamic_axis == 1 and len(shape) > 0 and shape[0] == -1:
# collapse axis with batch axis
if b_any(_ == C.InferredDimension for _ in x.shape) or b_any(
_ == C.FreeDimension for _ in x.shape):
warnings.warn(
'Warning: CNTK backend does not support '
'collapse of batch axis with inferred dimension. '
'The reshape did not take place.')
return x
return _reshape_batch(x, shape)
else:
# no collapse, then first need to padding the shape
if num_dynamic_axis >= len(shape):
i = 0
while i < len(shape):
if shape[i] is None or shape[i] == -1:
i += 1
else:
break
shape = tuple([-1 for _ in range(num_dynamic_axis - i)]) + shape
new_shape = list(shape)
new_shape = new_shape[num_dynamic_axis:]
new_shape = [C.InferredDimension if _ is None else _ for _ in new_shape]
return C.reshape(x, new_shape)
def permute_dimensions(x, pattern):
dims = len(int_shape(x))
num_dynamic_axis = _get_dynamic_axis_num(x)
if isinstance(pattern, list):
current_layout = [i for i in range(dims)]
else:
current_layout = tuple([i for i in range(dims)])
if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]:
raise ValueError('CNTK backend: the permute pattern %s '
'requested permute on dynamic axis, '
'which is not supported. Please do permute '
'on static axis.' % pattern)
axis = list(pattern)
axis = axis[num_dynamic_axis:]
axis = _normalize_axis(axis, x)
return C.transpose(x, axis)
def resize_images(x, height_factor, width_factor, data_format):
if data_format == 'channels_first':
output = repeat_elements(x, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, height_factor, axis=1)
output = repeat_elements(output, width_factor, axis=2)
return output
else:
raise ValueError('CNTK Backend: Invalid data_format:', data_format)
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('CNTK Backend: Invalid data_format:', data_format)
def repeat_elements(x, rep, axis):
axis = _normalize_axis(axis, x)
axis = axis[0]
slices = []
shape = x.shape
i = 0
while i < shape[axis]:
tmp = C.ops.slice(x, axis, i, i + 1)
for _ in range(rep):
slices.append(tmp)
i += 1
return C.splice(*slices, axis=axis)
def repeat(x, n):
# this is a workaround for recurrent layer
# if n is inferred dimension,
# we can't figure out how to repeat it in cntk now
# return the same x to take cntk broadcast feature
# to make the recurrent layer work.
# need to be fixed in GA.
if n is C.InferredDimension or n is C.FreeDimension:
return x
index = 1 - _get_dynamic_axis_num(x)
if index < 0 or index > 1:
raise NotImplementedError
new_shape = list(x.shape)
new_shape.insert(index, 1)
new_shape = tuple(new_shape)
x = C.reshape(x, new_shape)
temp = [x] * n
return C.splice(*temp, axis=index)
def tanh(x):
return C.tanh(x)
def _static_rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
shape = int_shape(inputs)
dims = len(shape)
uses_learning_phase = False
if dims < 3:
raise ValueError('Input should be at least 3D.')
# if the second axis is static axis, CNTK will do unroll by default
if shape[1] is None:
raise ValueError('CNTK Backend: the input of static rnn '
'has shape `%s`, the second axis '
'is not static. If you want to run '
'rnn with non-static axis, please try '
'dynamic rnn with sequence axis.' % shape)
if constants is None:
constants = []
if mask is not None:
mask_shape = int_shape(mask)
if len(mask_shape) == dims - 1:
mask = expand_dims(mask)
nones = _get_dynamic_axis_num(inputs)
states = tuple(initial_states)
outputs = []
time_axis = 1 - nones if nones > 0 else 1
if go_backwards:
i = shape[1] - 1
while i >= 0:
current = C.ops.slice(inputs, time_axis, i, i + 1)
# remove dummy dimension
current = squeeze(current, time_axis)
output, new_states = step_function(
current, tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
if mask is not None:
mask_slice = C.ops.slice(mask, time_axis, i, i + 1)
mask_slice = squeeze(mask_slice, time_axis)
if len(outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = outputs[-1]
output = C.ops.element_select(mask_slice, output, prev_output)
return_states = []
for s, n_s in zip(states, new_states):
return_states.append(
C.ops.element_select(
mask_slice, n_s, s))
new_states = return_states
outputs.append(output)
states = new_states
i -= 1
else:
i = 0
while i < shape[1]:
current = C.ops.slice(inputs, time_axis, i, i + 1)
# remove dummy dimension
current = squeeze(current, 1)
output, new_states = step_function(
current, tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
if mask is not None:
mask_slice = C.ops.slice(mask, time_axis, i, i + 1)
mask_slice = squeeze(mask_slice, 1)
if len(outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = outputs[-1]
output = C.ops.element_select(mask_slice, output, prev_output)
return_states = []
for s, n_s in zip(states, new_states):
return_states.append(
C.ops.element_select(
mask_slice, n_s, s))
new_states = return_states
outputs.append(output)
states = new_states[:len(states)]
i += 1
i = 1
# add the time_step axis back
final_output = expand_dims(outputs[0], 1)
last_output = outputs[0]
while i < len(outputs):
# add the time_step axis back
output_slice = expand_dims(outputs[i], 1)
final_output = C.splice(final_output, output_slice, axis=time_axis)
last_output = outputs[i]
i += 1
last_output._uses_learning_phase = uses_learning_phase
return last_output, final_output, states
def rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
shape = int_shape(inputs)
dims = len(shape)
global uses_learning_phase
uses_learning_phase = False
if dims < 3:
raise ValueError('CNTK Backend: the input of rnn has only rank %d '
'Need at least rank 3 to run RNN.' % dims)
if _get_dynamic_axis_num(inputs) == 0 or unroll:
return _static_rnn(
step_function,
inputs,
initial_states,
go_backwards,
mask,
constants,
unroll,
input_length)
if constants is None:
constants = []
num_time_step = shape[1]
if num_time_step is None and not has_seq_axis(inputs):
num_time_step = inputs.shape[0]
initial = []
for s in initial_states:
if _get_dynamic_axis_num(s) == 0:
if hasattr(C, 'to_batch'):
initial.append(C.to_batch(s))
else:
initial.append(C.user_function(ConvertToBatch(s)))
else:
initial.append(s)
need_convert = not has_seq_axis(inputs)
if go_backwards and need_convert is False:
raise NotImplementedError('CNTK Backend: `go_backwards` is not supported with '
'variable-length sequences. Please specify a '
'static length for your sequences.')
rnn_inputs = inputs
if need_convert:
if go_backwards:
rnn_inputs = reverse(rnn_inputs, 1)
rnn_inputs = C.to_sequence(rnn_inputs)
rnn_constants = []
for constant in constants:
if isinstance(constant, list):
new_c = []
for c in constant:
if _get_dynamic_axis_num(c) == 1:
new_c.append(C.sequence.broadcast_as(c, rnn_inputs))
else:
new_c.append(c)
rnn_constants.append(new_c)
else:
if _get_dynamic_axis_num(constant) == 1:
rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs))
else:
rnn_constants.append(constant)
else:
rnn_constants = constants
if mask is not None and not has_seq_axis(mask):
if go_backwards:
mask = reverse(mask, 1)
if len(int_shape(mask)) == 2:
mask = expand_dims(mask)
mask = C.to_sequence_like(mask, rnn_inputs)
states = tuple(initial)
with C.default_options(axis_offset=1):
def _recurrence(x, states, m):
# create place holder
place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states]
past_values = []
for s, p in zip(states, place_holders):
past_values.append(C.sequence.past_value(p, s))
new_output, new_states = step_function(
x, tuple(past_values) + tuple(rnn_constants))
if getattr(new_output, '_uses_learning_phase', False):
global uses_learning_phase
uses_learning_phase = True
if m is not None:
new_states = [C.element_select(m, n, s) for n, s in zip(new_states, past_values)]
n_s = []
for o, p in zip(new_states, place_holders):
n_s.append(o.replace_placeholders({p: o.output}))
if len(n_s) > 0:
new_output = n_s[0]
return new_output, n_s
final_output, final_states = _recurrence(rnn_inputs, states, mask)
last_output = C.sequence.last(final_output)
last_states = [C.sequence.last(s) for s in final_states]
if need_convert:
final_output = C.sequence.unpack(final_output, 0, no_mask_output=True)
if num_time_step is not None and num_time_step is not C.FreeDimension:
final_output = _reshape_sequence(final_output, num_time_step)
f_stats = []
for l_s, i_s in zip(last_states, initial_states):
if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) == 1:
if hasattr(C, 'unpack_batch'):
f_stats.append(C.unpack_batch(l_s))
else:
f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0])))
else:
f_stats.append(l_s)
last_output._uses_learning_phase = uses_learning_phase
return last_output, final_output, f_stats
def has_seq_axis(x):
return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1
def l2_normalize(x, axis=None):
axis = [axis]
axis = _normalize_axis(axis, x)
norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0]))
return x / norm
def hard_sigmoid(x):
x = (0.2 * x) + 0.5
x = C.clip(x, 0.0, 1.0)
return x
def conv1d(x, kernel, strides=1, padding='valid',
data_format=None, dilation_rate=1):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel.shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
if data_format == 'channels_last':
x = C.swapaxes(x, 0, 1)
kernel = C.swapaxes(kernel, 0, 2)
padding = _preprocess_border_mode(padding)
strides = [strides]
x = C.convolution(
kernel,
x,
strides=tuple(strides),
auto_padding=[
False,
padding])
if data_format == 'channels_last':
x = C.swapaxes(x, 0, 1)
return x
def conv2d(x, kernel, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding])
else:
assert dilation_rate[0] == dilation_rate[1]
assert strides == (1, 1), 'Invalid strides for dilated convolution'
x = C.convolution(
kernel,
x,
strides=dilation_rate[0],
auto_padding=[
False,
padding,
padding])
return _postprocess_conv2d_output(x, data_format)
def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1,
padding='valid', data_format=None, dilation_rate=1):
raise NotImplementedError
def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1),
padding='valid', data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format)
depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)),
(-1, 1) + depthwise_kernel.shape[2:])
pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format)
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(depthwise_kernel, x,
strides=strides,
auto_padding=[False, padding, padding],
groups=x.shape[0])
x = C.convolution(pointwise_kernel, x,
strides=(1, 1, 1),
auto_padding=[False])
else:
if dilation_rate[0] != dilation_rate[1]:
raise ValueError('CNTK Backend: non-square dilation_rate is '
'not supported.')
if strides != (1, 1):
raise ValueError('Invalid strides for dilated convolution')
x = C.convolution(depthwise_kernel, x,
strides=dilation_rate[0],
auto_padding=[False, padding, padding])
x = C.convolution(pointwise_kernel, x,
strides=(1, 1, 1),
auto_padding=[False])
return _postprocess_conv2d_output(x, data_format)
def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format)
depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)),
(-1, 1) + depthwise_kernel.shape[2:])
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(depthwise_kernel, x,
strides=strides,
auto_padding=[False, padding, padding],
groups=x.shape[0])
else:
if dilation_rate[0] != dilation_rate[1]:
raise ValueError('CNTK Backend: non-square dilation_rate is '
'not supported.')
if strides != (1, 1):
raise ValueError('Invalid strides for dilated convolution')
x = C.convolution(depthwise_kernel, x,
strides=dilation_rate[0],
auto_padding=[False, padding, padding],
groups=x.shape[0])
return _postprocess_conv2d_output(x, data_format)
def conv3d(x, kernel, strides=(1, 1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv3d_input(x, data_format)
kernel = _preprocess_conv3d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = strides + (strides[0],)
x = C.convolution(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding,
padding])
return _postprocess_conv3d_output(x, data_format)
def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1),
padding='valid', data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv3d_input(x, data_format)
kernel = _preprocess_conv3d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = (1,) + strides
# cntk output_shape does not include batch axis
output_shape = output_shape[1:]
# in keras2, need handle output shape in different format
if data_format == 'channels_last':
shape = list(output_shape)
shape[0] = output_shape[3]
shape[1] = output_shape[0]
shape[2] = output_shape[1]
shape[3] = output_shape[2]
output_shape = tuple(shape)
x = C.convolution_transpose(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding,
padding],
output_shape=output_shape)
return _postprocess_conv3d_output(x, data_format)
def pool2d(x, pool_size, strides=(1, 1),
padding='valid', data_format=None,
pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
padding = _preprocess_border_mode(padding)
strides = strides
pool_size = pool_size
x = _preprocess_conv2d_input(x, data_format)
if pool_mode == 'max':
x = C.pooling(
x,
C.MAX_POOLING,
pool_size,
strides,
auto_padding=[padding])
elif pool_mode == 'avg':
x = C.pooling(
x,
C.AVG_POOLING,
pool_size,
strides,
auto_padding=[padding])
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
return _postprocess_conv2d_output(x, data_format)
def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid',
data_format=None, pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
padding = _preprocess_border_mode(padding)
x = _preprocess_conv3d_input(x, data_format)
if pool_mode == 'max':
x = C.pooling(
x,
C.MAX_POOLING,
pool_size,
strides,
auto_padding=[padding])
elif pool_mode == 'avg':
x = C.pooling(
x,
C.AVG_POOLING,
pool_size,
strides,
auto_padding=[padding])
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
return _postprocess_conv3d_output(x, data_format)
def relu(x, alpha=0., max_value=None):
if alpha != 0.:
negative_part = C.relu(-x)
x = C.relu(x)
if max_value is not None:
x = C.clip(x, 0.0, max_value)
if alpha != 0.:
x -= alpha * negative_part
return x
def dropout(x, level, noise_shape=None, seed=None):
if level < 0. or level >= 1:
raise ValueError('CNTK Backend: Invalid dropout level %s, '
'must be in interval [0, 1].' % level)
return C.dropout(x, level)
def batch_flatten(x):
# cntk's batch axis is not in shape,
# so just flatten all the dim in x.shape
dim = np.prod(x.shape)
x = C.reshape(x, (-1,))
x._keras_shape = (None, dim)
return x
def softmax(x, axis=-1):
return C.softmax(x, axis=axis)
def softplus(x):
return C.softplus(x)
def softsign(x):
return x / (1 + C.abs(x))
def categorical_crossentropy(target, output, from_logits=False):
if from_logits:
result = C.cross_entropy_with_softmax(output, target)
# cntk's result shape is (batch, 1), while keras expect (batch, )
return C.reshape(result, ())
else:
# scale preds so that the class probas of each sample sum to 1
output /= C.reduce_sum(output, axis=-1)
# avoid numerical instability with epsilon clipping
output = C.clip(output, epsilon(), 1.0 - epsilon())
return -sum(target * C.log(output), axis=-1)
def sparse_categorical_crossentropy(target, output, from_logits=False):
target = C.one_hot(target, output.shape[-1])
target = C.reshape(target, output.shape)
return categorical_crossentropy(target, output, from_logits)
class Function(object):
def __init__(self, inputs, outputs, updates=[], **kwargs):
self.placeholders = inputs
self.trainer = None
self.unrelated_updates = None
self.updates = updates
if len(updates) > 0:
assert len(outputs) > 0
self.loss = outputs[0]
# need group update by gradient place holder
u_ops = []
unrelated_updates = []
for update in updates:
if isinstance(update, tuple):
if len(update) != 2:
raise NotImplementedError
else:
u = C.assign(update[0], update[1])
else:
u = update
if len(u.arguments) == 0:
u_ops.append(u)
else:
unrelated_updates.append(u)
update_func = C.combine([u.output for u in u_ops])
grads = update_func.find_all_with_name('keras_grad_placeholder')
u_list = []
p_list = []
for g in grads:
if g in grad_parameter_dict:
p_list.append(grad_parameter_dict[g])
u_list.append(g)
else:
raise ValueError(
'CNTK backend: when constructing trainer, '
'found gradient node `%s` which is not '
'related to any parameters in the model. '
'Please double check how the gradient node '
'is constructed.' % g)
if len(u_list) > 0:
learner = C.cntk_py.universal_learner(p_list, u_list, update_func)
criterion = (
outputs[0],
outputs[1]) if len(outputs) > 1 else (
outputs[0],
)
self.trainer = C.trainer.Trainer(
outputs[0], criterion, [learner])
self.trainer_output = tuple([f.output for f in criterion])
elif len(u_ops) > 0:
unrelated_updates.extend(u_ops)
if len(unrelated_updates) > 0:
self.unrelated_updates = C.combine([_.output for _ in unrelated_updates])
if self.trainer is None:
self.metrics_outputs = [f.output for f in outputs]
self.metrics_func = C.combine(self.metrics_outputs)
# cntk only could handle loss and 1 metric in trainer, for metrics more
# than 2, need manual eval
elif len(outputs) > 2:
self.metrics_outputs = [f.output for f in outputs[2:]]
self.metrics_func = C.combine(self.metrics_outputs)
else:
self.metrics_func = None
@staticmethod
def _is_input_shape_compatible(input, placeholder):
if hasattr(input, 'shape') and hasattr(placeholder, 'shape'):
num_dynamic = get_num_dynamic_axis(placeholder)
input_shape = input.shape[num_dynamic:]
placeholder_shape = placeholder.shape
for i, p in zip(input_shape, placeholder_shape):
if i != p and p != C.InferredDimension and p != C.FreeDimension:
return False
return True
def __call__(self, inputs):
global _LEARNING_PHASE_PLACEHOLDER
global _LEARNING_PHASE
assert isinstance(inputs, (list, tuple))
feed_dict = {}
for tensor, value in zip(self.placeholders, inputs):
# cntk only support calculate on float, do auto cast here
if (hasattr(value, 'dtype') and
value.dtype != np.float32 and
value.dtype != np.float64):
value = value.astype(np.float32)
if tensor == _LEARNING_PHASE_PLACEHOLDER:
_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value)
else:
# in current version cntk can't support input with variable
# length. Will support it in next release.
if not self._is_input_shape_compatible(value, tensor):
raise ValueError('CNTK backend: The placeholder has been resolved '
'to shape `%s`, but input shape is `%s`. Currently '
'CNTK can not take variable length inputs. Please '
'pass inputs that have a static shape.'
% (str(tensor.shape), str(value.shape)))
feed_dict[tensor] = value
updated = []
if self.trainer is not None:
input_dict = {}
for argument in self.loss.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError(
'CNTK backend: argument %s is not found in inputs. '
'Please double check the model and inputs in '
'`train_function`.' % argument.name)
result = self.trainer.train_minibatch(
input_dict, self.trainer_output)
assert(len(result) == 2)
outputs = result[1]
for o in self.trainer_output:
updated.append(outputs[o])
if self.metrics_func is not None:
input_dict = {}
for argument in self.metrics_func.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError('CNTK backend: metrics argument %s '
'is not found in inputs. Please double '
'check the model and inputs.' % argument.name)
# Some ops (like dropout) won't be applied during "eval" in cntk.
# They only evaluated in training phase. To make it work, call
# "forward" method to let cntk know we want to evaluate them.from
# But the assign ops won't be executed under this mode, that's why
# we need this check.
if (self.unrelated_updates is None and
(_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE == 1)):
_, output_values = self.metrics_func.forward(
input_dict,
self.metrics_func.outputs,
(self.metrics_func.outputs[0],),
as_numpy=False)
else:
output_values = self.metrics_func.eval(input_dict, as_numpy=False)
if isinstance(output_values, dict):
for o in self.metrics_outputs:
value = output_values[o]
v = value.asarray()
updated.append(v)
else:
v = output_values.asarray()
for o in self.metrics_outputs:
updated.append(v)
if self.unrelated_updates is not None:
input_dict = {}
for argument in self.unrelated_updates.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError(
'CNTK backend: assign ops argument %s '
'is not found in inputs. Please double '
'check the model and inputs.' % argument.name)
self.unrelated_updates.eval(input_dict, as_numpy=False)
return updated
def function(inputs, outputs, updates=[], **kwargs):
return Function(inputs, outputs, updates=updates, **kwargs)
def temporal_padding(x, padding=(1, 1)):
assert len(padding) == 2
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if num_dynamic_axis > 0:
assert len(base_shape) == 2
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[padding, (0, 0)])
else:
x = _padding(x, padding, 0)
else:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[(0, 0), padding, (0, 0)])
else:
x = _padding(x, padding, 1)
return x
def _padding(x, pattern, axis):
base_shape = x.shape
if b_any([dim < 0 for dim in base_shape]):
raise ValueError('CNTK Backend: padding input tensor with '
'shape `%s` contains non-specified dimension, '
'which is not supported. Please give fixed '
'dimension to enable padding.' % base_shape)
if pattern[0] > 0:
prefix_shape = list(base_shape)
prefix_shape[axis] = pattern[0]
prefix_shape = tuple(prefix_shape)
x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis)
base_shape = x.shape
if pattern[1] > 0:
postfix_shape = list(base_shape)
postfix_shape[axis] = pattern[1]
postfix_shape = tuple(postfix_shape)
x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis)
return x
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if data_format == 'channels_first':
if num_dynamic_axis > 0:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
else:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])])
else:
x = _padding(x, padding[0], 2)
x = _padding(x, padding[1], 3)
else:
if num_dynamic_axis > 0:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]])
else:
x = _padding(x, padding[0], 0)
x = _padding(x, padding[1], 1)
else:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
return x
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if data_format == 'channels_first':
if num_dynamic_axis > 0:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
x = _padding(x, padding[2], 3)
else:
assert len(base_shape) == 5
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])])
else:
x = _padding(x, padding[0], 2)
x = _padding(x, padding[1], 3)
x = _padding(x, padding[2], 4)
else:
if num_dynamic_axis > 0:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])
else:
x = _padding(x, padding[0], 0)
x = _padding(x, padding[1], 1)
x = _padding(x, padding[2], 2)
else:
assert len(base_shape) == 5
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
x = _padding(x, padding[2], 3)
return x
def one_hot(indices, num_classes):
return C.one_hot(indices, num_classes)
def get_value(x):
if isinstance(
x,
C.variables.Parameter) or isinstance(
x,
C.variables.Constant):
return x.value
else:
return eval(x)
def batch_get_value(xs):
result = []
for x in xs:
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
result.append(x.value)
else:
result.append(eval(x))
return result
def set_value(x, value):
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
if isinstance(value, (float, int)):
value = np.full(x.shape, value, dtype=floatx())
x.value = value
else:
raise NotImplementedError
def print_tensor(x, message=''):
return C.user_function(
LambdaFunc(x,
when=lambda x: True,
execute=lambda x: print(message)))
def batch_set_value(tuples):
for t in tuples:
x = t[0]
value = t[1]
if isinstance(value, np.ndarray) is False:
value = np.asarray(value)
if isinstance(x, C.variables.Parameter):
x.value = value
else:
raise NotImplementedError
def stop_gradient(variables):
if isinstance(variables, (list, tuple)):
return map(C.stop_gradient, variables)
else:
return C.stop_gradient(variables)
def switch(condition, then_expression, else_expression):
ndim_cond = ndim(condition)
ndim_expr = ndim(then_expression)
if ndim_cond > ndim_expr:
raise ValueError('Rank of condition should be less'
' than or equal to rank of then and'
' else expressions. ndim(condition)=' +
str(ndim_cond) + ', ndim(then_expression)'
'=' + str(ndim_expr))
elif ndim_cond < ndim_expr:
shape_expr = int_shape(then_expression)
ndim_diff = ndim_expr - ndim_cond
for i in range(ndim_diff):
condition = expand_dims(condition)
condition = tile(condition, shape_expr[ndim_cond + i])
return C.element_select(condition,
then_expression,
else_expression)
def elu(x, alpha=1.):
res = C.elu(x)
if alpha == 1:
return res
else:
return C.element_select(C.greater(x, 0), res, alpha * res)
def in_top_k(predictions, targets, k):
_targets = C.one_hot(targets, predictions.shape[-1])
result = C.classification_error(predictions, _targets, topN=k)
return 1 - C.reshape(result, shape=())
def conv2d_transpose(x, kernel, output_shape, strides=(1, 1),
padding='valid', data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = (1,) + strides
# cntk output_shape does not include batch axis
output_shape = output_shape[1:]
# in keras2, need handle output shape in different format
if data_format == 'channels_last':
shape = list(output_shape)
shape[0] = output_shape[2]
shape[1] = output_shape[0]
shape[2] = output_shape[1]
output_shape = tuple(shape)
x = C.convolution_transpose(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding],
output_shape=output_shape)
return _postprocess_conv2d_output(x, data_format)
def identity(x, name=None):
if name is None:
name = '%s_alias' % x.name
return C.alias(x, name=name)
def _preprocess_conv2d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
x = C.transpose(x, (2, 0, 1))
return x
def _preprocess_conv2d_kernel(kernel, data_format):
# As of Keras 2.0.0, all kernels are normalized
# on the format `(rows, cols, input_depth, depth)`,
# independently of `data_format`.
# CNTK expects `(depth, input_depth, rows, cols)`.
kernel = C.transpose(kernel, (3, 2, 0, 1))
return kernel
def _preprocess_border_mode(padding):
if padding == 'same':
padding = True
elif padding == 'valid':
padding = False
else:
raise ValueError('Invalid border mode: ' + str(padding))
return padding
def _postprocess_conv2d_output(x, data_format):
if data_format == 'channels_last':
x = C.transpose(x, (1, 2, 0))
return x
def _preprocess_conv3d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3,
# input_depth)
x = C.transpose(x, (3, 0, 1, 2))
return x
def _preprocess_conv3d_kernel(kernel, dim_ordering):
kernel = C.transpose(kernel, (4, 3, 0, 1, 2))
return kernel
def _postprocess_conv3d_output(x, dim_ordering):
if dim_ordering == 'channels_last':
x = C.transpose(x, (1, 2, 3, 0))
return x
def _get_dynamic_axis_num(x):
if hasattr(x, 'dynamic_axes'):
return len(x.dynamic_axes)
else:
return 0
def _contain_seqence_axis(x):
if _get_dynamic_axis_num(x) > 1:
return x.dynamic_axes[1] == C.Axis.default_dynamic_axis()
else:
return False
def get_num_dynamic_axis(x):
return _get_dynamic_axis_num(x)
def _reduce_on_axis(x, axis, reduce_fun_name):
if isinstance(axis, list):
for a in axis:
if isinstance(a, C.Axis) \
and a != C.Axis.default_batch_axis() \
and hasattr(C.sequence, reduce_fun_name):
x = getattr(C.sequence, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, axis)
return x
def _reshape_sequence(x, time_step):
tmp_shape = list(int_shape(x))
tmp_shape[1] = time_step
return reshape(x, tmp_shape)
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride = strides[0]
kernel_shape = int_shape(kernel)
output_length, feature_dim, filters = kernel_shape
xs = []
for i in range(output_length):
slice_length = slice(i * stride,
i * stride + kernel_size[0])
xs.append(reshape(inputs[:, slice_length, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to output_filters first, to apply broadcast
weight = permute_dimensions(kernel, (2, 0, 1))
# Shape: (batch, filters, output_length, input_length * kernel_size)
output = x_aggregate * weight
# Shape: (batch, filters, output_length)
output = sum(output, axis=3)
# Shape: (batch, output_length, filters)
return permute_dimensions(output, (0, 2, 1))
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride_row, stride_col = strides
output_row, output_col = output_shape
kernel_shape = int_shape(kernel)
_, feature_dim, filters = kernel_shape
xs = []
for i in range(output_row):
for j in range(output_col):
slice_row = slice(i * stride_row,
i * stride_row + kernel_size[0])
slice_col = slice(j * stride_col,
j * stride_col + kernel_size[1])
if data_format == 'channels_first':
xs.append(reshape(inputs[:, :, slice_row, slice_col],
(-1, 1, feature_dim)))
else:
xs.append(reshape(inputs[:, slice_row, slice_col, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to put filters first
weight = permute_dimensions(kernel, (2, 0, 1))
# shape: batch, filters, output_length, input_length * kernel_size
output = x_aggregate * weight
# shape: batch, filters, output_length
output = sum(output, axis=3)
# shape: batch, filters, row, col
output = reshape(output,
(-1, filters, output_row, output_col))
if data_format == 'channels_last':
# shape: batch, row, col, filters
output = permute_dimensions(output, (0, 2, 3, 1))
return output
def reverse(x, axes):
if isinstance(axes, int):
axes = [axes]
cntk_axes = _normalize_axis(axes, x)
begin_index = [0 for _ in cntk_axes]
end_index = [0 for _ in cntk_axes]
strides = [-1 for _ in cntk_axes]
return C.slice(x, cntk_axes, begin_index, end_index, strides)
def _reshape_batch(x, shape):
# there is a bug in cntk 2.1's unpack_batch implementation
if hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2:
const_a = C.unpack_batch(x)
const_a = C.reshape(const_a, shape)
return C.to_batch(const_a)
else:
return C.user_function(ReshapeBatch(x, shape[1:]))
def _get_cntk_version():
version = C.__version__
if version.endswith('+'):
version = version[:-1]
# for hot fix, ignore all the . except the first one.
if len(version) > 2 and version[1] == '.':
version = version[:2] + version[2:].replace('.', '')
try:
return float(version)
except:
warnings.warn(
'CNTK backend warning: CNTK version not detected. '
'Will using CNTK 2.0 GA as default.')
return float(2.0)
class ReshapeBatch(C.ops.functions.UserFunction):
def __init__(self, input, shape, name='reshape_with_batch'):
super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name)
self.from_shape = input.shape
self.target_shape = shape
def infer_outputs(self):
batch_axis = C.Axis.default_batch_axis()
return [
C.output_variable(
self.target_shape,
self.inputs[0].dtype,
[batch_axis])]
def forward(self, arguments, device=None, outputs_to_retain=None):
num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape))
num_static_element = np.prod(np.asarray(self.target_shape))
num_batch = int(num_element / num_static_element)
result = arguments.data().as_shape((num_batch,) + self.target_shape)
return None, C.cntk_py.Value(result)
def backward(self, state, root_gradients):
grad_array_view = root_gradients.data()
num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape))
num_static_element = np.prod(np.asarray(self.from_shape))
num_old_batch = int(num_element / num_static_element)
return C.cntk_py.Value(
grad_array_view.as_shape(
(num_old_batch,) + self.from_shape))
class ConvertToBatch(C.ops.functions.UserFunction):
"""Converts input first axis to CNTK batch axis.
We may introduce this operation in CNTK native
implementation later.
# Arguments
inputs: a cntk variable (parameter/constant)
name: name of this node
"""
def __init__(self, input, name='convert_to_batch'):
super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name)
def infer_outputs(self):
batch_axis = C.Axis.default_batch_axis()
return [
C.output_variable(
self.inputs[0].shape[1:],
self.inputs[0].dtype,
[batch_axis])]
def forward(self, arguments, device=None, outputs_to_retain=None):
return None, C.cntk_py.Value(arguments.data())
def backward(self, state, root_gradients):
return C.cntk_py.Value(root_gradients.data())
class ConvertToStatic(C.ops.functions.UserFunction):
"""Converts input first axis to CNTK static axis.
We may introduce this operation in CNTK native
implementation later.
# Arguments
inputs: a cntk tensor which has batch axis
batch_size: size of batch axis.
name: name of this node.
"""
def __init__(self, input, batch_size, name='convert_to_static'):
super(ConvertToStatic, self).__init__([input], as_numpy=False, name=name)
self.target_shape = (batch_size,) + input.shape
def infer_outputs(self):
return [
C.output_variable(
self.target_shape,
self.inputs[0].dtype,
[])]
def forward(self, arguments, device=None, outputs_to_retain=None):
return None, C.cntk_py.Value(arguments.data())
def backward(self, state, root_gradients):
return C.cntk_py.Value(root_gradients.data())
class LambdaFunc(C.ops.functions.UserFunction):
def __init__(self,
arg,
when=lambda arg: True,
execute=lambda arg: print(arg),
name=''):
self.when = when
self.execute = execute
super(LambdaFunc, self).__init__([arg], name=name)
def infer_outputs(self):
return [
C.output_variable(
self.inputs[0].shape,
self.inputs[0].dtype,
self.inputs[0].dynamic_axes)]
def forward(self, argument, device=None, outputs_to_retain=None):
if self.when(argument):
self.execute(argument)
return None, argument
def backward(self, state, root_gradients):
return root_gradients
| [((313, 349), 'cntk.set_global_option', 'C.set_global_option', (['"""align_axis"""', '(1)'], {}), "('align_axis', 1)\n", (332, 349), True, 'import cntk as C\n'), ((371, 400), 'cntk.device.use_default_device', 'C.device.use_default_device', ([], {}), '()\n', (398, 400), True, 'import cntk as C\n'), ((854, 933), 'cntk.constant', 'C.constant', ([], {'shape': '()', 'dtype': 'np.float32', 'value': '(1.0)', 'name': '"""_keras_learning_phase"""'}), "(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase')\n", (864, 933), True, 'import cntk as C\n'), ((1069, 1085), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1080, 1085), False, 'from collections import defaultdict\n'), ((425, 583), 'warnings.warn', 'warnings.warn', (['"""CNTK backend warning: GPU is not detected. CNTK\'s CPU version is not fully optimized,please run with GPU to get better performance."""'], {}), '(\n "CNTK backend warning: GPU is not detected. CNTK\'s CPU version is not fully optimized,please run with GPU to get better performance."\n )\n', (438, 583), False, 'import warnings\n'), ((2269, 2284), 'numpy.asarray', 'np.asarray', (['(1.0)'], {}), '(1.0)\n', (2279, 2284), True, 'import numpy as np\n'), ((17501, 17520), 'cntk.swapaxes', 'C.swapaxes', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (17511, 17520), True, 'import cntk as C\n'), ((19618, 19633), 'cntk.square', 'C.square', (['(x - m)'], {}), '(x - m)\n', (19626, 19633), True, 'import cntk as C\n'), ((20103, 20126), 'cntk.reshape', 'C.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (20112, 20126), True, 'import cntk as C\n'), ((20778, 20801), 'cntk.reshape', 'C.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (20787, 20801), True, 'import cntk as C\n'), ((25068, 25097), 'cntk.ops.argmax', 'C.ops.argmax', (['x'], {'axis': 'axis[0]'}), '(x, axis=axis[0])\n', (25080, 25097), True, 'import cntk as C\n'), ((25235, 25264), 'cntk.ops.argmin', 'C.ops.argmin', (['x'], {'axis': 'axis[0]'}), '(x, axis=axis[0])\n', (25247, 25264), True, 'import cntk as C\n'), ((25337, 25348), 'cntk.square', 'C.square', (['x'], {}), '(x)\n', (25345, 25348), True, 'import cntk as C\n'), ((25374, 25382), 'cntk.abs', 'C.abs', (['x'], {}), '(x)\n', (25379, 25382), True, 'import cntk as C\n'), ((25409, 25418), 'cntk.sqrt', 'C.sqrt', (['x'], {}), '(x)\n', (25415, 25418), True, 'import cntk as C\n'), ((25444, 25452), 'cntk.exp', 'C.exp', (['x'], {}), '(x)\n', (25449, 25452), True, 'import cntk as C\n'), ((25478, 25486), 'cntk.log', 'C.log', (['x'], {}), '(x)\n', (25483, 25486), True, 'import cntk as C\n'), ((25514, 25524), 'cntk.round', 'C.round', (['x'], {}), '(x)\n', (25521, 25524), True, 'import cntk as C\n'), ((25554, 25566), 'cntk.sigmoid', 'C.sigmoid', (['x'], {}), '(x)\n', (25563, 25566), True, 'import cntk as C\n'), ((25634, 25645), 'cntk.pow', 'C.pow', (['x', 'a'], {}), '(x, a)\n', (25639, 25645), True, 'import cntk as C\n'), ((25887, 25918), 'cntk.clip', 'C.clip', (['x', 'min_value', 'max_value'], {}), '(x, min_value, max_value)\n', (25893, 25918), True, 'import cntk as C\n'), ((26274, 26292), 'cntk.assign', 'C.assign', (['x', 'new_x'], {}), '(x, new_x)\n', (26282, 26292), True, 'import cntk as C\n'), ((26360, 26426), 'cntk.assign', 'C.assign', (['variable', '(variable * momentum + value * (1.0 - momentum))'], {}), '(variable, variable * momentum + value * (1.0 - momentum))\n', (26368, 26426), True, 'import cntk as C\n'), ((26496, 26515), 'cntk.assign', 'C.assign', (['x', 'result'], {}), '(x, result)\n', (26504, 26515), True, 'import cntk as C\n'), ((27076, 27089), 'cntk.equal', 'C.equal', (['x', 'y'], {}), '(x, y)\n', (27083, 27089), True, 'import cntk as C\n'), ((27124, 27141), 'cntk.not_equal', 'C.not_equal', (['x', 'y'], {}), '(x, y)\n', (27135, 27141), True, 'import cntk as C\n'), ((27174, 27189), 'cntk.greater', 'C.greater', (['x', 'y'], {}), '(x, y)\n', (27183, 27189), True, 'import cntk as C\n'), ((27228, 27249), 'cntk.greater_equal', 'C.greater_equal', (['x', 'y'], {}), '(x, y)\n', (27243, 27249), True, 'import cntk as C\n'), ((27279, 27291), 'cntk.less', 'C.less', (['x', 'y'], {}), '(x, y)\n', (27285, 27291), True, 'import cntk as C\n'), ((27327, 27345), 'cntk.less_equal', 'C.less_equal', (['x', 'y'], {}), '(x, y)\n', (27339, 27345), True, 'import cntk as C\n'), ((27378, 27397), 'cntk.element_max', 'C.element_max', (['x', 'y'], {}), '(x, y)\n', (27391, 27397), True, 'import cntk as C\n'), ((27430, 27449), 'cntk.element_min', 'C.element_min', (['x', 'y'], {}), '(x, y)\n', (27443, 27449), True, 'import cntk as C\n'), ((27475, 27483), 'cntk.sin', 'C.sin', (['x'], {}), '(x)\n', (27480, 27483), True, 'import cntk as C\n'), ((27509, 27517), 'cntk.cos', 'C.cos', (['x'], {}), '(x)\n', (27514, 27517), True, 'import cntk as C\n'), ((29326, 29348), 'cntk.stop_gradient', 'C.stop_gradient', (['shift'], {}), '(shift)\n', (29341, 29348), True, 'import cntk as C\n'), ((29368, 29385), 'cntk.minus', 'C.minus', (['x', 'shift'], {}), '(x, shift)\n', (29375, 29385), True, 'import cntk as C\n'), ((29681, 29708), 'cntk.plus', 'C.plus', (['shifted_mean', 'shift'], {}), '(shifted_mean, shift)\n', (29687, 29708), True, 'import cntk as C\n'), ((30817, 30849), 'cntk.splice', 'C.splice', (['*tensors'], {'axis': 'axis[0]'}), '(*tensors, axis=axis[0])\n', (30825, 30849), True, 'import cntk as C\n'), ((33044, 33064), 'cntk.transpose', 'C.transpose', (['x', 'axis'], {}), '(x, axis)\n', (33055, 33064), True, 'import cntk as C\n'), ((34544, 34572), 'cntk.splice', 'C.splice', (['*slices'], {'axis': 'axis'}), '(*slices, axis=axis)\n', (34552, 34572), True, 'import cntk as C\n'), ((35137, 35160), 'cntk.reshape', 'C.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (35146, 35160), True, 'import cntk as C\n'), ((35191, 35218), 'cntk.splice', 'C.splice', (['*temp'], {'axis': 'index'}), '(*temp, axis=index)\n', (35199, 35218), True, 'import cntk as C\n'), ((35245, 35254), 'cntk.tanh', 'C.tanh', (['x'], {}), '(x)\n', (35251, 35254), True, 'import cntk as C\n'), ((44060, 44079), 'cntk.clip', 'C.clip', (['x', '(0.0)', '(1.0)'], {}), '(x, 0.0, 1.0)\n', (44066, 44079), True, 'import cntk as C\n'), ((50105, 50191), 'cntk.convolution', 'C.convolution', (['kernel', 'x', 'strides'], {'auto_padding': '[False, padding, padding, padding]'}), '(kernel, x, strides, auto_padding=[False, padding, padding,\n padding])\n', (50118, 50191), True, 'import cntk as C\n'), ((51244, 51367), 'cntk.convolution_transpose', 'C.convolution_transpose', (['kernel', 'x', 'strides'], {'auto_padding': '[False, padding, padding, padding]', 'output_shape': 'output_shape'}), '(kernel, x, strides, auto_padding=[False, padding,\n padding, padding], output_shape=output_shape)\n', (51267, 51367), True, 'import cntk as C\n'), ((53443, 53452), 'cntk.relu', 'C.relu', (['x'], {}), '(x)\n', (53449, 53452), True, 'import cntk as C\n'), ((53819, 53838), 'cntk.dropout', 'C.dropout', (['x', 'level'], {}), '(x, level)\n', (53828, 53838), True, 'import cntk as C\n'), ((53959, 53975), 'numpy.prod', 'np.prod', (['x.shape'], {}), '(x.shape)\n', (53966, 53975), True, 'import numpy as np\n'), ((53984, 54003), 'cntk.reshape', 'C.reshape', (['x', '(-1,)'], {}), '(x, (-1,))\n', (53993, 54003), True, 'import cntk as C\n'), ((54088, 54111), 'cntk.softmax', 'C.softmax', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (54097, 54111), True, 'import cntk as C\n'), ((54142, 54155), 'cntk.softplus', 'C.softplus', (['x'], {}), '(x)\n', (54152, 54155), True, 'import cntk as C\n'), ((54854, 54889), 'cntk.one_hot', 'C.one_hot', (['target', 'output.shape[-1]'], {}), '(target, output.shape[-1])\n', (54863, 54889), True, 'import cntk as C\n'), ((54903, 54934), 'cntk.reshape', 'C.reshape', (['target', 'output.shape'], {}), '(target, output.shape)\n', (54912, 54934), True, 'import cntk as C\n'), ((67909, 67940), 'cntk.one_hot', 'C.one_hot', (['indices', 'num_classes'], {}), '(indices, num_classes)\n', (67918, 67940), True, 'import cntk as C\n'), ((70112, 70173), 'cntk.element_select', 'C.element_select', (['condition', 'then_expression', 'else_expression'], {}), '(condition, then_expression, else_expression)\n', (70128, 70173), True, 'import cntk as C\n'), ((70264, 70272), 'cntk.elu', 'C.elu', (['x'], {}), '(x)\n', (70269, 70272), True, 'import cntk as C\n'), ((70444, 70485), 'cntk.one_hot', 'C.one_hot', (['targets', 'predictions.shape[-1]'], {}), '(targets, predictions.shape[-1])\n', (70453, 70485), True, 'import cntk as C\n'), ((70499, 70552), 'cntk.classification_error', 'C.classification_error', (['predictions', '_targets'], {'topN': 'k'}), '(predictions, _targets, topN=k)\n', (70521, 70552), True, 'import cntk as C\n'), ((71478, 71592), 'cntk.convolution_transpose', 'C.convolution_transpose', (['kernel', 'x', 'strides'], {'auto_padding': '[False, padding, padding]', 'output_shape': 'output_shape'}), '(kernel, x, strides, auto_padding=[False, padding,\n padding], output_shape=output_shape)\n', (71501, 71592), True, 'import cntk as C\n'), ((71818, 71839), 'cntk.alias', 'C.alias', (['x'], {'name': 'name'}), '(x, name=name)\n', (71825, 71839), True, 'import cntk as C\n'), ((72461, 72494), 'cntk.transpose', 'C.transpose', (['kernel', '(3, 2, 0, 1)'], {}), '(kernel, (3, 2, 0, 1))\n', (72472, 72494), True, 'import cntk as C\n'), ((73365, 73401), 'cntk.transpose', 'C.transpose', (['kernel', '(4, 3, 0, 1, 2)'], {}), '(kernel, (4, 3, 0, 1, 2))\n', (73376, 73401), True, 'import cntk as C\n'), ((77621, 77675), 'cntk.slice', 'C.slice', (['x', 'cntk_axes', 'begin_index', 'end_index', 'strides'], {}), '(x, cntk_axes, begin_index, end_index, strides)\n', (77628, 77675), True, 'import cntk as C\n'), ((10726, 10740), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (10733, 10740), True, 'import numpy as np\n'), ((11156, 11186), 'numpy.random.randint', 'np.random.randint', (['(100000000.0)'], {}), '(100000000.0)\n', (11173, 11186), True, 'import numpy as np\n'), ((11188, 11208), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (11202, 11208), True, 'import numpy as np\n'), ((12455, 12481), 'numpy.random.randint', 'np.random.randint', (['(10000.0)'], {}), '(10000.0)\n', (12472, 12481), True, 'import numpy as np\n'), ((13139, 13169), 'numpy.random.randint', 'np.random.randint', (['(100000000.0)'], {}), '(100000000.0)\n', (13156, 13169), True, 'import numpy as np\n'), ((14113, 14145), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000000.0)'], {}), '(1, 10000000.0)\n', (14130, 14145), True, 'import numpy as np\n'), ((14939, 14951), 'numpy.eye', 'np.eye', (['size'], {}), '(size)\n', (14945, 14951), True, 'import numpy as np\n'), ((15954, 15967), 'cntk.times', 'C.times', (['x', 'y'], {}), '(x, y)\n', (15961, 15967), True, 'import cntk as C\n'), ((17778, 17810), 'cntk.ops.gather', 'C.ops.gather', (['reference', 'indices'], {}), '(reference, indices)\n', (17790, 17810), True, 'import cntk as C\n'), ((17887, 17922), 'cntk.ops.one_hot', 'C.ops.one_hot', (['indices', 'num_classes'], {}), '(indices, num_classes)\n', (17900, 17922), True, 'import cntk as C\n'), ((23765, 23784), 'cntk.reshape', 'C.reshape', (['x', 'shape'], {}), '(x, shape)\n', (23774, 23784), True, 'import cntk as C\n'), ((24272, 24296), 'cntk.reduce_sum', 'C.reduce_sum', (['any_matrix'], {}), '(any_matrix)\n', (24284, 24296), True, 'import cntk as C\n'), ((24641, 24665), 'cntk.reduce_sum', 'C.reduce_sum', (['all_matrix'], {}), '(all_matrix)\n', (24653, 24665), True, 'import cntk as C\n'), ((25597, 25605), 'cntk.abs', 'C.abs', (['x'], {}), '(x)\n', (25602, 25605), True, 'import cntk as C\n'), ((26018, 26035), 'cntk.sigmoid', 'C.sigmoid', (['output'], {}), '(output)\n', (26027, 26035), True, 'import cntk as C\n'), ((26910, 26969), 'cntk.constant', 'C.constant', (['(0)'], {'shape': 'v.shape', 'name': '"""keras_grad_placeholder"""'}), "(0, shape=v.shape, name='keras_grad_placeholder')\n", (26920, 26969), True, 'import cntk as C\n'), ((28608, 28637), 'cntk.reshape', 'C.reshape', (['mean', 'target_shape'], {}), '(mean, target_shape)\n', (28617, 28637), True, 'import cntk as C\n'), ((28662, 28694), 'cntk.reshape', 'C.reshape', (['variant', 'target_shape'], {}), '(variant, target_shape)\n', (28671, 28694), True, 'import cntk as C\n'), ((28721, 28751), 'cntk.reshape', 'C.reshape', (['gamma', 'target_shape'], {}), '(gamma, target_shape)\n', (28730, 28751), True, 'import cntk as C\n'), ((28777, 28806), 'cntk.reshape', 'C.reshape', (['beta', 'target_shape'], {}), '(beta, target_shape)\n', (28786, 28806), True, 'import cntk as C\n'), ((29432, 29470), 'cntk.reduce_mean', 'C.reduce_mean', (['shifted_mean'], {'axis': 'axis'}), '(shifted_mean, axis=axis)\n', (29445, 29470), True, 'import cntk as C\n'), ((29501, 29518), 'cntk.minus', 'C.minus', (['x', 'shift'], {}), '(x, shift)\n', (29508, 29518), True, 'import cntk as C\n'), ((29567, 29606), 'cntk.reduce_mean', 'C.reduce_mean', (['variance_mean'], {'axis': 'axis'}), '(variance_mean, axis=axis)\n', (29580, 29606), True, 'import cntk as C\n'), ((29646, 29668), 'cntk.square', 'C.square', (['shifted_mean'], {}), '(shifted_mean)\n', (29654, 29668), True, 'import cntk as C\n'), ((31069, 31088), 'cntk.reshape', 'C.reshape', (['x', 'shape'], {}), '(x, shape)\n', (31078, 31088), True, 'import cntk as C\n'), ((34427, 34457), 'cntk.ops.slice', 'C.ops.slice', (['x', 'axis', 'i', '(i + 1)'], {}), '(x, axis, i, i + 1)\n', (34438, 34457), True, 'import cntk as C\n'), ((38958, 39010), 'cntk.splice', 'C.splice', (['final_output', 'output_slice'], {'axis': 'time_axis'}), '(final_output, output_slice, axis=time_axis)\n', (38966, 39010), True, 'import cntk as C\n'), ((40814, 40839), 'cntk.to_sequence', 'C.to_sequence', (['rnn_inputs'], {}), '(rnn_inputs)\n', (40827, 40839), True, 'import cntk as C\n'), ((41733, 41769), 'cntk.to_sequence_like', 'C.to_sequence_like', (['mask', 'rnn_inputs'], {}), '(mask, rnn_inputs)\n', (41751, 41769), True, 'import cntk as C\n'), ((41809, 41841), 'cntk.default_options', 'C.default_options', ([], {'axis_offset': '(1)'}), '(axis_offset=1)\n', (41826, 41841), True, 'import cntk as C\n'), ((42888, 42917), 'cntk.sequence.last', 'C.sequence.last', (['final_output'], {}), '(final_output)\n', (42903, 42917), True, 'import cntk as C\n'), ((43028, 43083), 'cntk.sequence.unpack', 'C.sequence.unpack', (['final_output', '(0)'], {'no_mask_output': '(True)'}), '(final_output, 0, no_mask_output=True)\n', (43045, 43083), True, 'import cntk as C\n'), ((44644, 44663), 'cntk.swapaxes', 'C.swapaxes', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (44654, 44663), True, 'import cntk as C\n'), ((44681, 44705), 'cntk.swapaxes', 'C.swapaxes', (['kernel', '(0)', '(2)'], {}), '(kernel, 0, 2)\n', (44691, 44705), True, 'import cntk as C\n'), ((44976, 44995), 'cntk.swapaxes', 'C.swapaxes', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (44986, 44995), True, 'import cntk as C\n'), ((45553, 45626), 'cntk.convolution', 'C.convolution', (['kernel', 'x', 'strides'], {'auto_padding': '[False, padding, padding]'}), '(kernel, x, strides, auto_padding=[False, padding, padding])\n', (45566, 45626), True, 'import cntk as C\n'), ((45875, 45969), 'cntk.convolution', 'C.convolution', (['kernel', 'x'], {'strides': 'dilation_rate[0]', 'auto_padding': '[False, padding, padding]'}), '(kernel, x, strides=dilation_rate[0], auto_padding=[False,\n padding, padding])\n', (45888, 45969), True, 'import cntk as C\n'), ((46816, 46859), 'cntk.transpose', 'C.transpose', (['depthwise_kernel', '(1, 0, 2, 3)'], {}), '(depthwise_kernel, (1, 0, 2, 3))\n', (46827, 46859), True, 'import cntk as C\n'), ((47137, 47251), 'cntk.convolution', 'C.convolution', (['depthwise_kernel', 'x'], {'strides': 'strides', 'auto_padding': '[False, padding, padding]', 'groups': 'x.shape[0]'}), '(depthwise_kernel, x, strides=strides, auto_padding=[False,\n padding, padding], groups=x.shape[0])\n', (47150, 47251), True, 'import cntk as C\n'), ((47338, 47413), 'cntk.convolution', 'C.convolution', (['pointwise_kernel', 'x'], {'strides': '(1, 1, 1)', 'auto_padding': '[False]'}), '(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False])\n', (47351, 47413), True, 'import cntk as C\n'), ((47760, 47865), 'cntk.convolution', 'C.convolution', (['depthwise_kernel', 'x'], {'strides': 'dilation_rate[0]', 'auto_padding': '[False, padding, padding]'}), '(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[\n False, padding, padding])\n', (47773, 47865), True, 'import cntk as C\n'), ((47925, 48000), 'cntk.convolution', 'C.convolution', (['pointwise_kernel', 'x'], {'strides': '(1, 1, 1)', 'auto_padding': '[False]'}), '(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False])\n', (47938, 48000), True, 'import cntk as C\n'), ((48610, 48653), 'cntk.transpose', 'C.transpose', (['depthwise_kernel', '(1, 0, 2, 3)'], {}), '(depthwise_kernel, (1, 0, 2, 3))\n', (48621, 48653), True, 'import cntk as C\n'), ((48850, 48964), 'cntk.convolution', 'C.convolution', (['depthwise_kernel', 'x'], {'strides': 'strides', 'auto_padding': '[False, padding, padding]', 'groups': 'x.shape[0]'}), '(depthwise_kernel, x, strides=strides, auto_padding=[False,\n padding, padding], groups=x.shape[0])\n', (48863, 48964), True, 'import cntk as C\n'), ((49333, 49457), 'cntk.convolution', 'C.convolution', (['depthwise_kernel', 'x'], {'strides': 'dilation_rate[0]', 'auto_padding': '[False, padding, padding]', 'groups': 'x.shape[0]'}), '(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[\n False, padding, padding], groups=x.shape[0])\n', (49346, 49457), True, 'import cntk as C\n'), ((52011, 52082), 'cntk.pooling', 'C.pooling', (['x', 'C.MAX_POOLING', 'pool_size', 'strides'], {'auto_padding': '[padding]'}), '(x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding])\n', (52020, 52082), True, 'import cntk as C\n'), ((52899, 52970), 'cntk.pooling', 'C.pooling', (['x', 'C.MAX_POOLING', 'pool_size', 'strides'], {'auto_padding': '[padding]'}), '(x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding])\n', (52908, 52970), True, 'import cntk as C\n'), ((53424, 53434), 'cntk.relu', 'C.relu', (['(-x)'], {}), '(-x)\n', (53430, 53434), True, 'import cntk as C\n'), ((53495, 53520), 'cntk.clip', 'C.clip', (['x', '(0.0)', 'max_value'], {}), '(x, 0.0, max_value)\n', (53501, 53520), True, 'import cntk as C\n'), ((54309, 54353), 'cntk.cross_entropy_with_softmax', 'C.cross_entropy_with_softmax', (['output', 'target'], {}), '(output, target)\n', (54337, 54353), True, 'import cntk as C\n'), ((54443, 54464), 'cntk.reshape', 'C.reshape', (['result', '()'], {}), '(result, ())\n', (54452, 54464), True, 'import cntk as C\n'), ((54564, 54593), 'cntk.reduce_sum', 'C.reduce_sum', (['output'], {'axis': '(-1)'}), '(output, axis=-1)\n', (54576, 54593), True, 'import cntk as C\n'), ((69342, 69368), 'cntk.stop_gradient', 'C.stop_gradient', (['variables'], {}), '(variables)\n', (69357, 69368), True, 'import cntk as C\n'), ((70568, 70595), 'cntk.reshape', 'C.reshape', (['result'], {'shape': '()'}), '(result, shape=())\n', (70577, 70595), True, 'import cntk as C\n'), ((72154, 72179), 'cntk.transpose', 'C.transpose', (['x', '(2, 0, 1)'], {}), '(x, (2, 0, 1))\n', (72165, 72179), True, 'import cntk as C\n'), ((72850, 72875), 'cntk.transpose', 'C.transpose', (['x', '(1, 2, 0)'], {}), '(x, (1, 2, 0))\n', (72861, 72875), True, 'import cntk as C\n'), ((73255, 73283), 'cntk.transpose', 'C.transpose', (['x', '(3, 0, 1, 2)'], {}), '(x, (3, 0, 1, 2))\n', (73266, 73283), True, 'import cntk as C\n'), ((73523, 73551), 'cntk.transpose', 'C.transpose', (['x', '(1, 2, 3, 0)'], {}), '(x, (1, 2, 3, 0))\n', (73534, 73551), True, 'import cntk as C\n'), ((77855, 77872), 'cntk.unpack_batch', 'C.unpack_batch', (['x'], {}), '(x)\n', (77869, 77872), True, 'import cntk as C\n'), ((77891, 77916), 'cntk.reshape', 'C.reshape', (['const_a', 'shape'], {}), '(const_a, shape)\n', (77900, 77916), True, 'import cntk as C\n'), ((77932, 77951), 'cntk.to_batch', 'C.to_batch', (['const_a'], {}), '(const_a)\n', (77942, 77951), True, 'import cntk as C\n'), ((78836, 78863), 'cntk.Axis.default_batch_axis', 'C.Axis.default_batch_axis', ([], {}), '()\n', (78861, 78863), True, 'import cntk as C\n'), ((80338, 80365), 'cntk.Axis.default_batch_axis', 'C.Axis.default_batch_axis', ([], {}), '()\n', (80363, 80365), True, 'import cntk as C\n'), ((3166, 3200), 'cntk.element_select', 'C.element_select', (['training', 'x', 'alt'], {}), '(training, x, alt)\n', (3182, 3200), True, 'import cntk as C\n'), ((12703, 12742), 'cntk.initializer.uniform', 'C.initializer.uniform', (['scale'], {'seed': 'seed'}), '(scale, seed=seed)\n', (12724, 12742), True, 'import cntk as C\n'), ((13366, 13410), 'cntk.initializer.normal', 'C.initializer.normal', ([], {'scale': 'scale', 'seed': 'seed'}), '(scale=scale, seed=seed)\n', (13386, 13410), True, 'import cntk as C\n'), ((14289, 14338), 'cntk.initializer.truncated_normal', 'C.initializer.truncated_normal', (['stddev'], {'seed': 'seed'}), '(stddev, seed=seed)\n', (14319, 14338), True, 'import cntk as C\n'), ((14581, 14603), 'numpy.zeros', 'np.zeros', (['shape', 'ctype'], {}), '(shape, ctype)\n', (14589, 14603), True, 'import numpy as np\n'), ((14785, 14806), 'numpy.ones', 'np.ones', (['shape', 'ctype'], {}), '(shape, ctype)\n', (14792, 14806), True, 'import numpy as np\n'), ((15849, 15881), 'cntk.transpose', 'C.transpose', (['y'], {'perm': 'permutation'}), '(y, perm=permutation)\n', (15860, 15881), True, 'import cntk as C\n'), ((17108, 17131), 'cntk.swapaxes', 'C.swapaxes', (['x', 'i', '(i + 1)'], {}), '(x, i, i + 1)\n', (17118, 17131), True, 'import cntk as C\n'), ((17219, 17242), 'cntk.swapaxes', 'C.swapaxes', (['y', 'i', '(i - 1)'], {}), '(y, i, i - 1)\n', (17229, 17242), True, 'import cntk as C\n'), ((21328, 21369), 'cntk.splice', 'C.splice', (['*tmp'], {'axis': '(i - num_dynamic_axis)'}), '(*tmp, axis=i - num_dynamic_axis)\n', (21336, 21369), True, 'import cntk as C\n'), ((23066, 23083), 'cntk.Axis.all_axes', 'C.Axis.all_axes', ([], {}), '()\n', (23081, 23083), True, 'import cntk as C\n'), ((23395, 23460), 'cntk.reshape', 'C.reshape', (['result'], {'shape': '()', 'begin_axis': 'index', 'end_axis': '(index + 1)'}), '(result, shape=(), begin_axis=index, end_axis=index + 1)\n', (23404, 23460), True, 'import cntk as C\n'), ((24956, 24973), 'cntk.Axis.all_axes', 'C.Axis.all_axes', ([], {}), '()\n', (24971, 24973), True, 'import cntk as C\n'), ((26115, 26128), 'cntk.log', 'C.log', (['output'], {}), '(output)\n', (26120, 26128), True, 'import cntk as C\n'), ((26148, 26167), 'cntk.log', 'C.log', (['(1.0 - output)'], {}), '(1.0 - output)\n', (26153, 26167), True, 'import cntk as C\n'), ((29281, 29312), 'cntk.reduce_mean', 'C.reduce_mean', (['shift'], {'axis': 'axis'}), '(shift, axis=axis)\n', (29294, 29312), True, 'import cntk as C\n'), ((32296, 32319), 'cntk.reshape', 'C.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (32305, 32319), True, 'import cntk as C\n'), ((36429, 36469), 'cntk.ops.slice', 'C.ops.slice', (['inputs', 'time_axis', 'i', '(i + 1)'], {}), '(inputs, time_axis, i, i + 1)\n', (36440, 36469), True, 'import cntk as C\n'), ((37602, 37642), 'cntk.ops.slice', 'C.ops.slice', (['inputs', 'time_axis', 'i', '(i + 1)'], {}), '(inputs, time_axis, i, i + 1)\n', (37613, 37642), True, 'import cntk as C\n'), ((42941, 42959), 'cntk.sequence.last', 'C.sequence.last', (['s'], {}), '(s)\n', (42956, 42959), True, 'import cntk as C\n'), ((43957, 43968), 'cntk.square', 'C.square', (['x'], {}), '(x)\n', (43965, 43968), True, 'import cntk as C\n'), ((52185, 52256), 'cntk.pooling', 'C.pooling', (['x', 'C.AVG_POOLING', 'pool_size', 'strides'], {'auto_padding': '[padding]'}), '(x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding])\n', (52194, 52256), True, 'import cntk as C\n'), ((53073, 53144), 'cntk.pooling', 'C.pooling', (['x', 'C.AVG_POOLING', 'pool_size', 'strides'], {'auto_padding': '[padding]'}), '(x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding])\n', (53082, 53144), True, 'import cntk as C\n'), ((54195, 54203), 'cntk.abs', 'C.abs', (['x'], {}), '(x)\n', (54200, 54203), True, 'import cntk as C\n'), ((55923, 55959), 'cntk.combine', 'C.combine', (['[u.output for u in u_ops]'], {}), '([u.output for u in u_ops])\n', (55932, 55959), True, 'import cntk as C\n'), ((57442, 57473), 'cntk.combine', 'C.combine', (['self.metrics_outputs'], {}), '(self.metrics_outputs)\n', (57451, 57473), True, 'import cntk as C\n'), ((63010, 63045), 'cntk.pad', 'C.pad', (['x'], {'pattern': '[padding, (0, 0)]'}), '(x, pattern=[padding, (0, 0)])\n', (63015, 63045), True, 'import cntk as C\n'), ((63192, 63235), 'cntk.pad', 'C.pad', (['x'], {'pattern': '[(0, 0), padding, (0, 0)]'}), '(x, pattern=[(0, 0), padding, (0, 0)])\n', (63197, 63235), True, 'import cntk as C\n'), ((63857, 63896), 'cntk.constant', 'C.constant', ([], {'value': '(0)', 'shape': 'prefix_shape'}), '(value=0, shape=prefix_shape)\n', (63867, 63896), True, 'import cntk as C\n'), ((64115, 64155), 'cntk.constant', 'C.constant', ([], {'value': '(0)', 'shape': 'postfix_shape'}), '(value=0, shape=postfix_shape)\n', (64125, 64155), True, 'import cntk as C\n'), ((69046, 69063), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (69056, 69063), True, 'import numpy as np\n'), ((70353, 70368), 'cntk.greater', 'C.greater', (['x', '(0)'], {}), '(x, 0)\n', (70362, 70368), True, 'import cntk as C\n'), ((73799, 73828), 'cntk.Axis.default_dynamic_axis', 'C.Axis.default_dynamic_axis', ([], {}), '()\n', (73826, 73828), True, 'import cntk as C\n'), ((78362, 78472), 'warnings.warn', 'warnings.warn', (['"""CNTK backend warning: CNTK version not detected. Will using CNTK 2.0 GA as default."""'], {}), "(\n 'CNTK backend warning: CNTK version not detected. Will using CNTK 2.0 GA as default.'\n )\n", (78375, 78472), False, 'import warnings\n'), ((78893, 78965), 'cntk.output_variable', 'C.output_variable', (['self.target_shape', 'self.inputs[0].dtype', '[batch_axis]'], {}), '(self.target_shape, self.inputs[0].dtype, [batch_axis])\n', (78910, 78965), True, 'import cntk as C\n'), ((79207, 79236), 'numpy.asarray', 'np.asarray', (['self.target_shape'], {}), '(self.target_shape)\n', (79217, 79236), True, 'import numpy as np\n'), ((79394, 79417), 'cntk.cntk_py.Value', 'C.cntk_py.Value', (['result'], {}), '(result)\n', (79409, 79417), True, 'import cntk as C\n'), ((79640, 79667), 'numpy.asarray', 'np.asarray', (['self.from_shape'], {}), '(self.from_shape)\n', (79650, 79667), True, 'import numpy as np\n'), ((80395, 80474), 'cntk.output_variable', 'C.output_variable', (['self.inputs[0].shape[1:]', 'self.inputs[0].dtype', '[batch_axis]'], {}), '(self.inputs[0].shape[1:], self.inputs[0].dtype, [batch_axis])\n', (80412, 80474), True, 'import cntk as C\n'), ((81357, 81419), 'cntk.output_variable', 'C.output_variable', (['self.target_shape', 'self.inputs[0].dtype', '[]'], {}), '(self.target_shape, self.inputs[0].dtype, [])\n', (81374, 81419), True, 'import cntk as C\n'), ((82085, 82180), 'cntk.output_variable', 'C.output_variable', (['self.inputs[0].shape', 'self.inputs[0].dtype', 'self.inputs[0].dynamic_axes'], {}), '(self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0\n ].dynamic_axes)\n', (82102, 82180), True, 'import cntk as C\n'), ((31418, 31563), 'warnings.warn', 'warnings.warn', (['"""Warning: CNTK backend does not support collapse of batch axis with inferred dimension. The reshape did not take place."""'], {}), "(\n 'Warning: CNTK backend does not support collapse of batch axis with inferred dimension. The reshape did not take place.'\n )\n", (31431, 31563), False, 'import warnings\n'), ((36834, 36872), 'cntk.ops.slice', 'C.ops.slice', (['mask', 'time_axis', 'i', '(i + 1)'], {}), '(mask, time_axis, i, i + 1)\n', (36845, 36872), True, 'import cntk as C\n'), ((37117, 37170), 'cntk.ops.element_select', 'C.ops.element_select', (['mask_slice', 'output', 'prev_output'], {}), '(mask_slice, output, prev_output)\n', (37137, 37170), True, 'import cntk as C\n'), ((37999, 38037), 'cntk.ops.slice', 'C.ops.slice', (['mask', 'time_axis', 'i', '(i + 1)'], {}), '(mask, time_axis, i, i + 1)\n', (38010, 38037), True, 'import cntk as C\n'), ((38274, 38327), 'cntk.ops.element_select', 'C.ops.element_select', (['mask_slice', 'output', 'prev_output'], {}), '(mask_slice, output, prev_output)\n', (38294, 38327), True, 'import cntk as C\n'), ((41945, 41987), 'cntk.placeholder', 'C.placeholder', ([], {'dynamic_axes': 'x.dynamic_axes'}), '(dynamic_axes=x.dynamic_axes)\n', (41958, 41987), True, 'import cntk as C\n'), ((56690, 56746), 'cntk.cntk_py.universal_learner', 'C.cntk_py.universal_learner', (['p_list', 'u_list', 'update_func'], {}), '(p_list, u_list, update_func)\n', (56717, 56746), True, 'import cntk as C\n'), ((56950, 57001), 'cntk.trainer.Trainer', 'C.trainer.Trainer', (['outputs[0]', 'criterion', '[learner]'], {}), '(outputs[0], criterion, [learner])\n', (56967, 57001), True, 'import cntk as C\n'), ((57264, 57312), 'cntk.combine', 'C.combine', (['[_.output for _ in unrelated_updates]'], {}), '([_.output for _ in unrelated_updates])\n', (57273, 57312), True, 'import cntk as C\n'), ((57719, 57750), 'cntk.combine', 'C.combine', (['self.metrics_outputs'], {}), '(self.metrics_outputs)\n', (57728, 57750), True, 'import cntk as C\n'), ((58902, 58919), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (58912, 58919), True, 'import numpy as np\n'), ((79141, 79168), 'numpy.asarray', 'np.asarray', (['self.from_shape'], {}), '(self.from_shape)\n', (79151, 79168), True, 'import numpy as np\n'), ((79572, 79601), 'numpy.asarray', 'np.asarray', (['self.target_shape'], {}), '(self.target_shape)\n', (79582, 79601), True, 'import numpy as np\n'), ((11650, 11680), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'p', 'size'], {}), '(1, p, size)\n', (11668, 11680), True, 'import numpy as np\n'), ((28425, 28455), 'cntk.reduce_mean', 'C.reduce_mean', (['gamma', '(axis - 1)'], {}), '(gamma, axis - 1)\n', (28438, 28455), True, 'import cntk as C\n'), ((28483, 28512), 'cntk.reduce_mean', 'C.reduce_mean', (['beta', '(axis - 1)'], {}), '(beta, axis - 1)\n', (28496, 28512), True, 'import cntk as C\n'), ((30621, 30632), 'cntk.sqrt', 'C.sqrt', (['var'], {}), '(var)\n', (30627, 30632), True, 'import cntk as C\n'), ((40197, 40210), 'cntk.to_batch', 'C.to_batch', (['s'], {}), '(s)\n', (40207, 40210), True, 'import cntk as C\n'), ((42121, 42148), 'cntk.sequence.past_value', 'C.sequence.past_value', (['p', 's'], {}), '(p, s)\n', (42142, 42148), True, 'import cntk as C\n'), ((42479, 42504), 'cntk.element_select', 'C.element_select', (['m', 'n', 's'], {}), '(m, n, s)\n', (42495, 42504), True, 'import cntk as C\n'), ((43463, 43482), 'cntk.unpack_batch', 'C.unpack_batch', (['l_s'], {}), '(l_s)\n', (43477, 43482), True, 'import cntk as C\n'), ((54743, 54756), 'cntk.log', 'C.log', (['output'], {}), '(output)\n', (54748, 54756), True, 'import cntk as C\n'), ((74098, 74125), 'cntk.Axis.default_batch_axis', 'C.Axis.default_batch_axis', ([], {}), '()\n', (74123, 74125), True, 'import cntk as C\n'), ((37328, 37368), 'cntk.ops.element_select', 'C.ops.element_select', (['mask_slice', 'n_s', 's'], {}), '(mask_slice, n_s, s)\n', (37348, 37368), True, 'import cntk as C\n'), ((38485, 38525), 'cntk.ops.element_select', 'C.ops.element_select', (['mask_slice', 'n_s', 's'], {}), '(mask_slice, n_s, s)\n', (38505, 38525), True, 'import cntk as C\n'), ((41365, 41410), 'cntk.sequence.broadcast_as', 'C.sequence.broadcast_as', (['constant', 'rnn_inputs'], {}), '(constant, rnn_inputs)\n', (41388, 41410), True, 'import cntk as C\n'), ((55663, 55693), 'cntk.assign', 'C.assign', (['update[0]', 'update[1]'], {}), '(update[0], update[1])\n', (55671, 55693), True, 'import cntk as C\n'), ((41099, 41137), 'cntk.sequence.broadcast_as', 'C.sequence.broadcast_as', (['c', 'rnn_inputs'], {}), '(c, rnn_inputs)\n', (41122, 41137), True, 'import cntk as C\n')] |
nVoid/Yale-TouchDesigner-April2016 | Project Files/Prebuilt tools/twitter/Twitter/pylib/oauthlib/oauth1/rfc5849/endpoints/resource.py | 40eb36f515fa3935f3e9ddaa923664e88308262c | # -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.resource
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the resource protection provider logic of
OAuth 1.0 RFC 5849.
"""
from __future__ import absolute_import, unicode_literals
from oauthlib.common import log
from .base import BaseEndpoint
from .. import errors
class ResourceEndpoint(BaseEndpoint):
"""An endpoint responsible for protecting resources.
Typical use is to instantiate with a request validator and invoke the
``validate_protected_resource_request`` in a decorator around a view
function. If the request is valid, invoke and return the response of the
view. If invalid create and return an error response directly from the
decorator.
See :doc:`/oauth1/validator` for details on which validator methods to implement
for this endpoint.
An example decorator::
from functools import wraps
from your_validator import your_validator
from oauthlib.oauth1 import ResourceEndpoint
endpoint = ResourceEndpoint(your_validator)
def require_oauth(realms=None):
def decorator(f):
@wraps(f)
def wrapper(request, *args, **kwargs):
v, r = provider.validate_protected_resource_request(
request.url,
http_method=request.method,
body=request.data,
headers=request.headers,
realms=realms or [])
if v:
return f(*args, **kwargs)
else:
return abort(403)
"""
def validate_protected_resource_request(self, uri, http_method='GET',
body=None, headers=None, realms=None):
"""Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param realms: A list of realms the resource is protected under.
This will be supplied to the ``validate_realms``
method of the request validator.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object.
"""
try:
request = self._create_request(uri, http_method, body, headers)
except errors.OAuth1Error:
return False, None
try:
self._check_transport_security(request)
self._check_mandatory_parameters(request)
except errors.OAuth1Error:
return False, request
if not request.resource_owner_key:
return False, request
if not self.request_validator.check_access_token(
request.resource_owner_key):
return False, request
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
access_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_access_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_access_token
# Note that `realm`_ is only used in authorization headers and how
# it should be interepreted is not included in the OAuth spec.
# However they could be seen as a scope or realm to which the
# client has access and as such every client should be checked
# to ensure it is authorized access to that scope or realm.
# .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2
#
# Note that early exit would enable client realm access enumeration.
#
# The require_realm indicates this is the first step in the OAuth
# workflow where a client requests access to a specific realm.
# This first step (obtaining request token) need not require a realm
# and can then be identified by checking the require_resource_owner
# flag and abscence of realm.
#
# Clients obtaining an access token will not supply a realm and it will
# not be checked. Instead the previously requested realm should be
# transferred from the request token to the access token.
#
# Access to protected resources will always validate the realm but note
# that the realm is now tied to the access token and not provided by
# the client.
valid_realm = self.request_validator.validate_realms(request.client_key,
request.resource_owner_key, request, uri=request.uri,
realms=realms)
valid_signature = self._check_signature(request)
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_realm,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s", valid_client)
log.info("Valid token: %s", valid_resource_owner)
log.info("Valid realm: %s", valid_realm)
log.info("Valid signature: %s", valid_signature)
return v, request
| [((6638, 6688), 'oauthlib.common.log.info', 'log.info', (['"""[Failure] request verification failed."""'], {}), "('[Failure] request verification failed.')\n", (6646, 6688), False, 'from oauthlib.common import log\n'), ((6701, 6743), 'oauthlib.common.log.info', 'log.info', (['"""Valid client: %s"""', 'valid_client'], {}), "('Valid client: %s', valid_client)\n", (6709, 6743), False, 'from oauthlib.common import log\n'), ((6756, 6805), 'oauthlib.common.log.info', 'log.info', (['"""Valid token: %s"""', 'valid_resource_owner'], {}), "('Valid token: %s', valid_resource_owner)\n", (6764, 6805), False, 'from oauthlib.common import log\n'), ((6818, 6858), 'oauthlib.common.log.info', 'log.info', (['"""Valid realm: %s"""', 'valid_realm'], {}), "('Valid realm: %s', valid_realm)\n", (6826, 6858), False, 'from oauthlib.common import log\n'), ((6871, 6919), 'oauthlib.common.log.info', 'log.info', (['"""Valid signature: %s"""', 'valid_signature'], {}), "('Valid signature: %s', valid_signature)\n", (6879, 6919), False, 'from oauthlib.common import log\n')] |
AymenSe/Geometric-operations-DIP | python/ex_1.py | ef0b0bc86210a8da5e63136bf5a239179b869722 | ####################################################
#
# @ Authors : SEKHRI Aymen
# MOHAMMED HACENE Tarek
#
# @ Hint: you have to install all requirements
# from requirements.txt
#
####################################################
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
# load the image
onion_img = cv.imread("onion.png")
# Store height and width and channels of the image
row, col, chs = onion_img.shape
# Store the spectral resolution
dtype_img = onion_img.dtype # This will give you: uint8
def translation(img, trans):
"""
args:
- img: absolute path to the image
- trans: must be a tuple (row_trans, col_trans)
"""
# read the image
image = cv.imread(img)
# retrieve the height and the width
height, width = image.shape[:2]
# retrieve the params of translation
row_trans, col_trans = trans
# Create the translation matrix
T = np.float32([[1, 0, col_trans], [0, 1, row_trans]])
# Apply the T matrix: T*M
img_translation = cv.warpAffine(image, T, (width, height))
# show the images
cv.imshow("Original Image", image)
cv.imshow('Translation Image', img_translation)
# Don't destroy the images until the user do
cv.waitKey()
cv.destroyAllWindows()
# translation 20 pixel to the right
translation("onion.png", (0, 20))
# translation 50 lines and 100 cols to the right
translation("onion.png", (50, 100))
# remove the peper from the image using translations
translation("onion.png", (40, 40))
| [((368, 390), 'cv2.imread', 'cv.imread', (['"""onion.png"""'], {}), "('onion.png')\n", (377, 390), True, 'import cv2 as cv\n'), ((726, 740), 'cv2.imread', 'cv.imread', (['img'], {}), '(img)\n', (735, 740), True, 'import cv2 as cv\n'), ((917, 967), 'numpy.float32', 'np.float32', (['[[1, 0, col_trans], [0, 1, row_trans]]'], {}), '([[1, 0, col_trans], [0, 1, row_trans]])\n', (927, 967), True, 'import numpy as np\n'), ((1015, 1055), 'cv2.warpAffine', 'cv.warpAffine', (['image', 'T', '(width, height)'], {}), '(image, T, (width, height))\n', (1028, 1055), True, 'import cv2 as cv\n'), ((1076, 1110), 'cv2.imshow', 'cv.imshow', (['"""Original Image"""', 'image'], {}), "('Original Image', image)\n", (1085, 1110), True, 'import cv2 as cv\n'), ((1112, 1159), 'cv2.imshow', 'cv.imshow', (['"""Translation Image"""', 'img_translation'], {}), "('Translation Image', img_translation)\n", (1121, 1159), True, 'import cv2 as cv\n'), ((1208, 1220), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (1218, 1220), True, 'import cv2 as cv\n'), ((1222, 1244), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1242, 1244), True, 'import cv2 as cv\n')] |
h-zcc/ref-nms | utils/hit_rate_utils.py | 8f83f350c497d0ef875c778a8ce76725552abb3c | from utils.misc import calculate_iou, xywh_to_xyxy
__all__ = ['NewHitRateEvaluator', 'CtxHitRateEvaluator']
class NewHitRateEvaluator:
def __init__(self, refer, top_N=None, threshold=0.5):
"""Evaluate refexp-based hit rate.
Args:
refdb: `refdb` dict.
split: Dataset split to evaluate on.
top_N: Select top-N scoring proposals to evaluate. `None` means no selection. Default `None`.
"""
self.refer = refer
self.top_N = top_N
self.threshold = threshold
def eval_hit_rate(self, split, proposal_dict, image_as_key=False):
"""Evaluate refexp-based hit rate.
Args:
proposal_dict: {exp_id or image_id: [{box: [4,], score: float}]}.
image_as_key: Use image_id instead of exp_id as key, default `False`.
Returns:
proposal_per_ref: Number of proposals per refexp.
hit_rate: Refexp-based hit rate of proposals.
"""
# Initialize counters
num_hit = 0
num_proposal = 0
num_ref = 0 # NOTE: this is the number of refexp, not ref
for ref_id in self.refer.getRefIds(split=split):
ref = self.refer.Refs[ref_id]
image_id = ref['image_id']
ann_id = ref['ann_id']
ann = self.refer.Anns[ann_id]
gt_box = xywh_to_xyxy(ann['bbox'])
for exp_id in ref['sent_ids']:
# Get proposals
if image_as_key:
proposals = proposal_dict[image_id]
else:
proposals = proposal_dict[exp_id]
# Rank and select proposals
ranked_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N]
for proposal in ranked_proposals:
if calculate_iou(gt_box, proposal['box']) > self.threshold:
num_hit += 1
break
num_proposal += len(ranked_proposals)
num_ref += 1
proposal_per_ref = num_proposal / num_ref
hit_rate = num_hit / num_ref
return proposal_per_ref, hit_rate
class CtxHitRateEvaluator:
def __init__(self, refer, ctxdb, top_N=None, threshold=0.5):
self.refer = refer
self.ctxdb = ctxdb
self.top_N = top_N
self.threshold = threshold
def eval_hit_rate(self, split, proposal_dict, image_as_key=False):
"""Evaluate refexp-based hit rate.
Args:
proposal_dict: {exp_id or image_id: [{box: [4,], score: float}]}.
image_as_key: Use image_id instead of exp_id as key, default `False`.
Returns:
proposal_per_ref: Number of proposals per refexp.
hit_rate: Refexp-based hit rate of proposals.
"""
# Initialize counters
recall_list = []
avg_num_list = []
for exp_id, ctx in self.ctxdb[split].items():
exp_id = int(exp_id)
if len(ctx['ctx']) == 0:
continue
# Get proposals
if image_as_key:
image_id = self.refer.sentToRef[exp_id]['image_id']
proposals = proposal_dict[image_id]
else:
proposals = proposal_dict[exp_id]
# Rank and select proposals
ranked_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N]
hit_num, ctx_num = 0, 0
for ctx_item in ctx['ctx']:
ctx_num += 1
ctx_box = ctx_item['box']
for proposal in ranked_proposals:
if calculate_iou(ctx_box, proposal['box']) > self.threshold:
hit_num += 1
break
recall_list.append(hit_num / ctx_num)
avg_num_list.append(len(ranked_proposals))
return sum(avg_num_list) / len(avg_num_list), sum(recall_list) / len(recall_list)
| [((1365, 1390), 'utils.misc.xywh_to_xyxy', 'xywh_to_xyxy', (["ann['bbox']"], {}), "(ann['bbox'])\n", (1377, 1390), False, 'from utils.misc import calculate_iou, xywh_to_xyxy\n'), ((1854, 1892), 'utils.misc.calculate_iou', 'calculate_iou', (['gt_box', "proposal['box']"], {}), "(gt_box, proposal['box'])\n", (1867, 1892), False, 'from utils.misc import calculate_iou, xywh_to_xyxy\n'), ((3679, 3718), 'utils.misc.calculate_iou', 'calculate_iou', (['ctx_box', "proposal['box']"], {}), "(ctx_box, proposal['box'])\n", (3692, 3718), False, 'from utils.misc import calculate_iou, xywh_to_xyxy\n')] |
amukher3/Problem_solutions | LeetCode_ReorderDataLogFiles.py | 8fa6014a91f295d08cafb989024caa91d99211d9 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 22 19:07:30 2020
@author: Abhishek Mukherjee
"""
class Solution:
def reorderLogFiles(self, logs: List[str]) -> List[str]:
letLog=[]
digLog=[]
for i in range(len(logs)):
temp=[]
temp=logs[i].split(' ')
if temp[1].isdigit() is True:
digLog.append(logs[i])
else:
letLog.append(logs[i])
tempLetLog=[]
for i in letLog:
tempLetLog.append(' '.join(i.split(' ')[1:]+[i.split(' ')[0]]))
tempLetLog=sorted(tempLetLog)
letLog=[]
for i in tempLetLog:
tempPrime=i.split(' ')[:-1]
temp=i.split(' ')[-1]
letLog.append(' '.join([temp]+tempPrime))
return letLog+digLog | [] |
fairhopeweb/saleor | saleor/core/transactions.py | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | from contextlib import contextmanager
from django.db import DatabaseError
from ..core.tracing import traced_atomic_transaction
@contextmanager
def transaction_with_commit_on_errors():
"""Perform transaction and raise an error in any occurred."""
error = None
with traced_atomic_transaction():
try:
yield
except DatabaseError:
raise
except Exception as e:
error = e
if error:
raise error
| [] |
diberry/azure-cli | src/command_modules/azure-cli-policyinsights/azure/cli/command_modules/policyinsights/tests/latest/test_policyinsights_scenario.py | 302999245cbb13b890b0a74f03443c577bd4bfae | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, record_only
@record_only()
class PolicyInsightsTests(ScenarioTest):
def test_policy_insights(self):
top_clause = '--top 2'
filter_clause = '--filter "isCompliant eq false"'
apply_clause = '--apply "groupby((policyAssignmentId, resourceId), aggregate($count as numRecords))"'
select_clause = '--select "policyAssignmentId, resourceId, numRecords"'
order_by_clause = '--order-by "numRecords desc"'
from_clause = '--from "2018-04-04T00:00:00"'
to_clause = '--to "2018-05-22T00:00:00"'
scopes = [
'-m "azgovtest4"',
'',
'-g "defaultresourcegroup-eus"',
'--resource "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba"',
'--resource "omssecuritydevkeyvalut" --namespace "microsoft.keyvault" --resource-type "vaults" -g "omssecurityintresourcegroup"',
'--resource "default" --namespace "microsoft.network" --resource-type "subnets" --parent "virtualnetworks/mms-wcus-vnet" -g "mms-wcus"',
'-s "335cefd2-ab16-430f-b364-974a170eb1d5"',
'-d "25bf1e2a-6004-47ad-9bd1-2a40dd6de016"',
'-a "96e22f7846e94bb186ae3a01"',
'-a "bc916e4f3ab54030822a11b3" -g "tipkeyvaultresourcegroup" '
]
for scope in scopes:
events = self.cmd('az policy event list {} {} {} {} {} {} {} {}'.format(
scope,
from_clause,
to_clause,
filter_clause,
apply_clause,
select_clause,
order_by_clause,
top_clause)).get_output_in_json()
assert len(events) >= 0
states = self.cmd('az policy state list {} {} {} {} {} {} {} {}'.format(
scope,
from_clause,
to_clause,
filter_clause,
apply_clause,
select_clause,
order_by_clause,
top_clause)).get_output_in_json()
assert len(states) >= 0
summary = self.cmd('az policy state summarize {} {} {} {} {}'.format(
scope,
from_clause,
to_clause,
filter_clause,
top_clause)).get_output_in_json()
assert summary["results"] is not None
assert len(summary["policyAssignments"]) >= 0
if len(summary["policyAssignments"]) > 0:
assert summary["policyAssignments"][0]["results"] is not None
assert len(summary["policyAssignments"][0]["policyDefinitions"]) >= 0
if len(summary["policyAssignments"][0]["policyDefinitions"]) > 0:
assert summary["policyAssignments"][0]["policyDefinitions"][0]["results"] is not None
| [((405, 418), 'azure.cli.testsdk.record_only', 'record_only', ([], {}), '()\n', (416, 418), False, 'from azure.cli.testsdk import ScenarioTest, record_only\n')] |
Aslic/rmats_turbo_4.1.0 | tests/prep_post/test.py | c651509a5d32799315054fa37a2210fab2aae5e5 | import glob
import os.path
import subprocess
import sys
import unittest
import tests.bam
import tests.base_test
import tests.gtf
import tests.output_parser as output_parser
import tests.test_config
import tests.util
class Test(tests.base_test.BaseTest):
def setUp(self):
super().setUp()
self._test_base_dir = tests.test_config.TEST_BASE_DIR
self._test_dir = os.path.join(self._test_base_dir, 'prep_post')
self._generated_input_dir = os.path.join(self._test_dir,
'generated_input')
self._out_dir = os.path.join(self._test_dir, 'out')
self._prep_1_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_1')
self._prep_2_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_2')
self._post_tmp_dir = os.path.join(self._test_dir, 'tmp_post')
self._dup_input_bam_tmp_dir = os.path.join(self._test_dir,
'tmp_dup_input_bam')
self._dup_prep_bam_tmp_dir = os.path.join(self._test_dir,
'tmp_dup_prep_bam')
self._miss_input_bam_tmp_dir = os.path.join(self._test_dir,
'tmp_miss_input_bam')
self._miss_prep_bam_tmp_dir = os.path.join(self._test_dir,
'tmp_miss_prep_bam')
tests.util.recreate_dirs([
self._generated_input_dir, self._out_dir, self._prep_1_tmp_dir,
self._prep_2_tmp_dir, self._post_tmp_dir,
self._dup_input_bam_tmp_dir, self._dup_prep_bam_tmp_dir,
self._miss_input_bam_tmp_dir, self._miss_prep_bam_tmp_dir,
self._command_output_dir()
])
self._read_type = 'paired'
self._read_length = 50
self._sample_1_bams_path = os.path.join(self._generated_input_dir,
'b1.txt')
self._sample_2_bams_path = os.path.join(self._generated_input_dir,
'b2.txt')
sample_1_bam_replicate_template = os.path.join(
self._generated_input_dir, 'sample_1_rep_{}.bam')
sample_2_bam_replicate_template = os.path.join(
self._generated_input_dir, 'sample_2_rep_{}.bam')
self._sample_1_bams = self._create_sample_1_bams(
self._sample_1_bams_path, sample_1_bam_replicate_template)
self._sample_2_bams = self._create_sample_2_bams(
self._sample_2_bams_path, sample_2_bam_replicate_template)
self._gtf_path = os.path.join(self._generated_input_dir, 'test.gtf')
self._gtf = self._create_gtf(self._gtf_path)
self._sub_steps = [
'prep_1',
'inte_1_fail',
'inte_1_pass',
'prep_2',
'inte_2_fail',
'inte_2_pass',
'post',
'duplicate_input_bam',
'duplicate_prep_bam',
'missing_input_bam',
'missing_prep_bam',
]
self._sub_step = None
def test(self):
for sub_step in self._sub_steps:
self._sub_step = sub_step
self._setup_sub_step()
self._run_test()
def _command_output_dir(self):
return os.path.join(self._test_dir, 'command_output')
def _rmats_arguments(self):
arguments = [
'--gtf',
self._gtf_path,
'--od',
self._out_dir,
'-t',
self._read_type,
'--readLength',
str(self._read_length),
]
if self._sub_step == 'prep_1':
arguments.extend([
'--tmp',
self._prep_1_tmp_dir,
'--b1',
self._sample_1_bams_path,
'--task',
'prep',
])
elif self._sub_step == 'inte_1_fail':
arguments.extend([
'--tmp',
self._post_tmp_dir,
'--b1',
self._sample_1_bams_path,
'--b2',
self._sample_2_bams_path,
'--task',
'inte',
])
elif self._sub_step == 'inte_1_pass':
arguments.extend([
'--tmp',
self._post_tmp_dir,
'--b1',
self._sample_1_bams_path,
'--task',
'inte',
'--statoff',
])
elif self._sub_step == 'prep_2':
arguments.extend([
'--tmp',
self._prep_2_tmp_dir,
'--b1',
self._sample_2_bams_path,
'--task',
'prep',
])
elif self._sub_step == 'inte_2_fail':
arguments.extend([
'--tmp',
self._post_tmp_dir,
'--b1',
self._sample_2_bams_path,
'--task',
'inte',
'--statoff',
])
elif self._sub_step == 'inte_2_pass':
arguments.extend([
'--tmp',
self._post_tmp_dir,
'--b1',
self._sample_1_bams_path,
'--b2',
self._sample_2_bams_path,
'--task',
'inte',
])
elif self._sub_step == 'post':
arguments.extend([
'--tmp',
self._post_tmp_dir,
'--b1',
self._sample_1_bams_path,
'--b2',
self._sample_2_bams_path,
'--task',
'post',
])
elif self._sub_step == 'duplicate_input_bam':
arguments.extend([
'--tmp',
self._dup_input_bam_tmp_dir,
'--b1',
self._dup_input_bam_path,
'--task',
'post',
'--statoff',
])
elif self._sub_step == 'duplicate_prep_bam':
arguments.extend([
'--tmp',
self._dup_prep_bam_tmp_dir,
'--b1',
self._dup_prep_bam_path,
'--task',
'post',
'--statoff',
])
elif self._sub_step == 'missing_input_bam':
arguments.extend([
'--tmp',
self._miss_input_bam_tmp_dir,
'--b1',
self._miss_input_bam_path,
'--task',
'post',
'--statoff',
])
elif self._sub_step == 'missing_prep_bam':
arguments.extend([
'--tmp',
self._miss_prep_bam_tmp_dir,
'--b1',
self._miss_prep_bam_path,
'--task',
'post',
'--statoff',
])
return arguments
def _setup_sub_step(self):
if self._sub_step == 'duplicate_input_bam':
self._setup_dup_input_bam()
elif self._sub_step == 'duplicate_prep_bam':
self._setup_dup_prep_bam()
elif self._sub_step == 'missing_input_bam':
self._setup_miss_input_bam()
elif self._sub_step == 'missing_prep_bam':
self._setup_miss_prep_bam()
def _setup_dup_input_bam(self):
self._dup_input_bam_path = os.path.join(self._generated_input_dir,
'dup_input.txt')
bams = self._sample_1_bams + [self._sample_1_bams[0]]
self._write_bams(bams, self._dup_input_bam_path)
self._cp_with_prefix('prep_1', self._prep_1_tmp_dir,
self._dup_input_bam_tmp_dir)
def _setup_dup_prep_bam(self):
self._dup_prep_bam_path = os.path.join(self._generated_input_dir,
'dup_prep.txt')
bams = self._sample_1_bams
self._write_bams(bams, self._dup_prep_bam_path)
self._cp_with_prefix('prep_1', self._prep_1_tmp_dir,
self._dup_prep_bam_tmp_dir)
self._cp_with_prefix('prep_1_again', self._prep_1_tmp_dir,
self._dup_prep_bam_tmp_dir)
def _setup_miss_input_bam(self):
self._miss_input_bam_path = os.path.join(self._generated_input_dir,
'miss_input.txt')
bams = [self._sample_1_bams[0]]
self._write_bams(bams, self._miss_input_bam_path)
self._cp_with_prefix('prep_1', self._prep_1_tmp_dir,
self._miss_input_bam_tmp_dir)
def _setup_miss_prep_bam(self):
self._miss_prep_bam_path = os.path.join(self._generated_input_dir,
'miss_prep.txt')
bams = self._sample_1_bams + self._sample_2_bams
self._write_bams(bams, self._miss_prep_bam_path)
self._cp_with_prefix('prep_1', self._prep_1_tmp_dir,
self._miss_prep_bam_tmp_dir)
def _create_gtf(self, gtf_path):
gtf = tests.gtf.GTF()
gtf.path = gtf_path
transcript_1 = tests.gtf.Transcript()
transcript_1.chromosome = '1'
transcript_1.strand = '+'
transcript_1.gene_id = tests.util.gene_id_str(1)
transcript_1.gene_name = tests.util.gene_name_str(1)
transcript_1.transcript_id = tests.util.transcript_id_str(1)
transcript_1.exons = [(1, 100), (201, 300), (401, 500)]
gtf.transcripts = [transcript_1]
error = gtf.write()
self.assertFalse(error)
return gtf
def _create_sample_1_bams(self, sample_1_bams_path,
sample_1_replicate_template):
rep_1_bam = tests.bam.BAM()
rep_1_bam.path = sample_1_replicate_template.format(1)
rep_2_bam = tests.bam.BAM()
rep_2_bam.path = sample_1_replicate_template.format(2)
sample_1_bams = [rep_1_bam, rep_2_bam]
rep_1_read_1 = tests.bam.Read()
rep_1_read_1.ref_seq_name = '1' # chromosome
rep_1_read_1.ref_seq_len = 1000 # chromosome length
rep_1_read_1.template_name = tests.util.template_name_str([1, 1])
rep_1_read_2 = tests.bam.Read()
error = tests.bam.set_read_pair_from_intervals(rep_1_read_1,
rep_1_read_2,
[[76, 100], [201, 300]],
[[401, 475]],
self._read_length)
self.assertFalse(error)
rep_1_bam.reads = [rep_1_read_1, rep_1_read_2]
rep_2_read_1 = tests.bam.Read()
rep_2_read_1.ref_seq_name = '1' # chromosome
rep_2_read_1.ref_seq_len = 1000 # chromosome length
rep_2_read_1.template_name = tests.util.template_name_str([1, 2])
rep_2_read_2 = tests.bam.Read()
error = tests.bam.set_read_pair_from_intervals(
rep_2_read_1, rep_2_read_2, [[26, 100]], [[201, 300], [401, 425]],
self._read_length)
self.assertFalse(error)
rep_2_bam.reads = [rep_2_read_1, rep_2_read_2]
self._write_bams(sample_1_bams, sample_1_bams_path)
return sample_1_bams
def _create_sample_2_bams(self, sample_2_bams_path,
sample_2_replicate_template):
rep_1_bam = tests.bam.BAM()
rep_1_bam.path = sample_2_replicate_template.format(1)
rep_2_bam = tests.bam.BAM()
rep_2_bam.path = sample_2_replicate_template.format(2)
sample_2_bams = [rep_1_bam, rep_2_bam]
rep_1_read_1 = tests.bam.Read()
rep_1_read_1.ref_seq_name = '1' # chromosome
rep_1_read_1.ref_seq_len = 1000 # chromosome length
rep_1_read_1.template_name = tests.util.template_name_str([2, 1])
rep_1_read_2 = tests.bam.Read()
error = tests.bam.set_read_pair_from_intervals(rep_1_read_1,
rep_1_read_2,
[[76, 100], [401, 500]],
[[401, 475]],
self._read_length)
self.assertFalse(error)
rep_1_bam.reads = [rep_1_read_1, rep_1_read_2]
rep_2_read_1 = tests.bam.Read()
rep_2_read_1.ref_seq_name = '1' # chromosome
rep_2_read_1.ref_seq_len = 1000 # chromosome length
rep_2_read_1.template_name = tests.util.template_name_str([2, 2])
rep_2_read_2 = tests.bam.Read()
error = tests.bam.set_read_pair_from_intervals(rep_2_read_1,
rep_2_read_2,
[[26, 100]],
[[1, 100], [401, 425]],
self._read_length)
self.assertFalse(error)
rep_2_bam.reads = [rep_2_read_1, rep_2_read_2]
self._write_bams(sample_2_bams, sample_2_bams_path)
return sample_2_bams
def _cp_with_prefix(self, prefix, source_dir, dest_dir):
source_paths = self._get_dot_rmats_paths(source_dir)
command = [
sys.executable, tests.test_config.CP_WITH_PREFIX, prefix, dest_dir
]
command.extend(source_paths)
subprocess.run(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
def _check_results(self):
if self._sub_step == 'prep_1':
self._check_results_prep_1()
elif self._sub_step == 'inte_1_fail':
self._check_results_inte_1_fail()
elif self._sub_step == 'inte_1_pass':
self._check_results_inte_1_pass()
elif self._sub_step == 'prep_2':
self._check_results_prep_2()
elif self._sub_step == 'inte_2_fail':
self._check_results_inte_2_fail()
elif self._sub_step == 'inte_2_pass':
self._check_results_inte_2_pass()
elif self._sub_step == 'post':
self._check_results_post()
elif self._sub_step == 'duplicate_input_bam':
self._check_results_dup_input_bam()
elif self._sub_step == 'duplicate_prep_bam':
self._check_results_dup_prep_bam()
elif self._sub_step == 'missing_input_bam':
self._check_results_miss_input_bam()
elif self._sub_step == 'missing_prep_bam':
self._check_results_miss_prep_bam()
else:
self.fail('unexpected sub_step: {}'.format(self._sub_step))
def _get_dot_rmats_paths(self, tmp_dir):
dot_rmats_file_paths = glob.glob(os.path.join(tmp_dir, '*.rmats'))
# filenames begin with a timestamp used for alphanumeric sort
return sorted(dot_rmats_file_paths)
def _check_results_prep_1(self):
self._check_no_error_results()
command_stdout_file_name = self._get_stdout_file_name()
with open(command_stdout_file_name, 'rt') as out_f_h:
out_lines = out_f_h.readlines()
tests.util.assert_no_line_has(self, out_lines,
'Processing count files')
test_gene_id = tests.util.gene_id_str(1)
quoted_test_gene_id = tests.util.double_quote(test_gene_id)
dot_rmats_paths = self._get_dot_rmats_paths(self._prep_1_tmp_dir)
self.assertEqual(len(dot_rmats_paths), 2)
for dot_rmats_i in range(2):
dot_rmats_contents, error = output_parser.parse_dot_rmats(
dot_rmats_paths[dot_rmats_i])
self.assertFalse(error)
self.assertEqual(dot_rmats_contents['bams'],
[self._sample_1_bams[dot_rmats_i].path])
self.assertEqual(dot_rmats_contents['read_length'],
self._read_length)
novel_juncs = dot_rmats_contents['novel_juncs']
self.assertEqual(novel_juncs, [dict()])
exons = dot_rmats_contents['exons']
if dot_rmats_i == 0:
self.assertEqual(exons, [{
quoted_test_gene_id: [{
'start_box': [401, 499],
'end_box': [401, 499],
'counts': [1, 0]
}]
}])
else:
self.assertEqual(exons, [{
quoted_test_gene_id: [{
'start_box': [1, 99],
'end_box': [1, 99],
'counts': [1, 0]
}]
}])
multis = dot_rmats_contents['multis']
if dot_rmats_i == 0:
self.assertEqual(multis, [{
quoted_test_gene_id: [{
'junction_pairs': [[1, 1], [100, 200], [299, 299]],
'count':
1
}]
}])
else:
self.assertEqual(multis, [{
quoted_test_gene_id: [{
'junction_pairs': [[201, 201], [300, 400], [499, 499]],
'count':
1
}]
}])
self._cp_with_prefix('prep_1_', self._prep_1_tmp_dir,
self._post_tmp_dir)
def _check_results_prep_2(self):
self._check_no_error_results()
command_stdout_file_name = self._get_stdout_file_name()
with open(command_stdout_file_name, 'rt') as out_f_h:
out_lines = out_f_h.readlines()
tests.util.assert_no_line_has(self, out_lines,
'Processing count files')
test_gene_id = tests.util.gene_id_str(1)
quoted_test_gene_id = tests.util.double_quote(test_gene_id)
dot_rmats_paths = self._get_dot_rmats_paths(self._prep_2_tmp_dir)
self.assertEqual(len(dot_rmats_paths), 2)
for dot_rmats_i in range(2):
dot_rmats_contents, error = output_parser.parse_dot_rmats(
dot_rmats_paths[dot_rmats_i])
self.assertFalse(error)
self.assertEqual(dot_rmats_contents['bams'],
[self._sample_2_bams[dot_rmats_i].path])
self.assertEqual(dot_rmats_contents['read_length'],
self._read_length)
novel_juncs = dot_rmats_contents['novel_juncs']
self.assertEqual(novel_juncs, [{quoted_test_gene_id: [[0, 0, 2]]}])
exons = dot_rmats_contents['exons']
if dot_rmats_i == 0:
self.assertEqual(exons, [{
quoted_test_gene_id: [{
'start_box': [401, 499],
'end_box': [401, 499],
'counts': [1, 0]
}]
}])
else:
self.assertEqual(exons, [{
quoted_test_gene_id: [{
'start_box': [1, 99],
'end_box': [1, 99],
'counts': [1, 0]
}]
}])
multis = dot_rmats_contents['multis']
if dot_rmats_i == 0:
self.assertEqual(multis, [{
quoted_test_gene_id: [{
'junction_pairs': [[1, 1], [100, 400], [499, 499]],
'count':
1
}]
}])
else:
self.assertEqual(multis, [{
quoted_test_gene_id: [{
'junction_pairs': [[1, 1], [100, 400], [499, 499]],
'count':
1
}]
}])
self._cp_with_prefix('prep_2_', self._prep_2_tmp_dir,
self._post_tmp_dir)
def _check_results_inte_1_fail(self):
self.assertNotEqual(self._rmats_return_code, 0)
command_stderr_file_name = self._get_stderr_file_name()
with open(command_stderr_file_name, 'rt') as err_f_h:
err_lines = err_f_h.readlines()
tests.util.assert_some_line_has(
self, err_lines, 'input bam files with no associated prep output')
def _check_results_inte_1_pass(self):
self._check_no_error_results()
def _check_results_inte_2_fail(self):
self.assertNotEqual(self._rmats_return_code, 0)
command_stderr_file_name = self._get_stderr_file_name()
with open(command_stderr_file_name, 'rt') as err_f_h:
err_lines = err_f_h.readlines()
tests.util.assert_some_line_has(
self, err_lines,
'bam files not in input but associated with prep output')
def _check_results_inte_2_pass(self):
self._check_no_error_results()
def _check_results_post(self):
self._check_no_error_results()
command_stdout_file_name = self._get_stdout_file_name()
with open(command_stdout_file_name, 'rt') as out_f_h:
out_lines = out_f_h.readlines()
tests.util.assert_some_line_has(self, out_lines,
'Processing count files')
from_gtf_se_path = os.path.join(self._out_dir, 'fromGTF.SE.txt')
from_gtf_se_header, from_gtf_se_rows, error = output_parser.parse_from_gtf(
from_gtf_se_path)
self.assertFalse(error)
self.assertEqual(len(from_gtf_se_rows), 1)
from_gtf_se_row = from_gtf_se_rows[0]
self.assertEqual(from_gtf_se_row['GeneID'],
tests.util.double_quote(tests.util.gene_id_str(1)))
self.assertEqual(from_gtf_se_row['exonStart_0base'], '200')
self.assertEqual(from_gtf_se_row['exonEnd'], '300')
jc_raw_se_path = os.path.join(self._out_dir, 'JC.raw.input.SE.txt')
jc_raw_se_header, jc_raw_se_rows, error = output_parser.parse_jc_raw(
jc_raw_se_path)
self.assertFalse(error)
self.assertEqual(len(jc_raw_se_rows), 1)
jc_raw_se_row = jc_raw_se_rows[0]
self.assertEqual(jc_raw_se_row['ID'], from_gtf_se_row['ID'])
self.assertEqual(jc_raw_se_row['IJC_SAMPLE_1'], '1,1')
self.assertEqual(jc_raw_se_row['SJC_SAMPLE_1'], '0,0')
self.assertEqual(jc_raw_se_row['IJC_SAMPLE_2'], '0,0')
self.assertEqual(jc_raw_se_row['SJC_SAMPLE_2'], '1,1')
se_mats_jc_path = os.path.join(self._out_dir, 'SE.MATS.JC.txt')
se_mats_jc_header, se_mats_jc_rows, error = output_parser.parse_mats_jc(
se_mats_jc_path)
self.assertFalse(error)
self._check_se_mats_jc_header(se_mats_jc_header)
self.assertEqual(len(se_mats_jc_rows), 1)
se_mats_jc_row = se_mats_jc_rows[0]
pvalue = float(se_mats_jc_row['PValue'])
tests.util.assert_within_bounds(self, pvalue, 0, 1)
fdr = float(se_mats_jc_row['FDR'])
tests.util.assert_within_bounds(self, fdr, 0, 1)
inc_level_1_splits = se_mats_jc_row['IncLevel1'].split(',')
self.assertEqual(len(inc_level_1_splits), 2)
self.assertAlmostEqual(float(inc_level_1_splits[0]), 1)
self.assertAlmostEqual(float(inc_level_1_splits[1]), 1)
inc_level_2_splits = se_mats_jc_row['IncLevel2'].split(',')
self.assertEqual(len(inc_level_2_splits), 2)
self.assertAlmostEqual(float(inc_level_2_splits[0]), 0)
self.assertAlmostEqual(float(inc_level_2_splits[1]), 0)
self.assertAlmostEqual(float(se_mats_jc_row['IncLevelDifference']), 1)
def _check_results_dup_input_bam(self):
self.assertNotEqual(self._rmats_return_code, 0)
command_stderr_file_name = self._get_stderr_file_name()
with open(command_stderr_file_name, 'rt') as err_f_h:
err_lines = err_f_h.readlines()
dup_bam_path = self._sample_1_bams[0].path
expected_error = '{} given 2 times'.format(dup_bam_path)
tests.util.assert_some_line_has(self, err_lines, expected_error)
def _check_results_dup_prep_bam(self):
self.assertNotEqual(self._rmats_return_code, 0)
command_stderr_file_name = self._get_stderr_file_name()
with open(command_stderr_file_name, 'rt') as err_f_h:
err_lines = err_f_h.readlines()
for bam in self._sample_1_bams:
dup_bam_path = bam.path
expected_error = '{} found 2 times in .rmats'.format(dup_bam_path)
tests.util.assert_some_line_has(self, err_lines, expected_error)
def _check_results_miss_input_bam(self):
self._check_no_error_results()
def _check_results_miss_prep_bam(self):
self.assertNotEqual(self._rmats_return_code, 0)
command_stderr_file_name = self._get_stderr_file_name()
with open(command_stderr_file_name, 'rt') as err_f_h:
err_lines = err_f_h.readlines()
for bam in self._sample_2_bams:
miss_bam_path = bam.path
expected_error = '{} not found in .rmats'.format(miss_bam_path)
tests.util.assert_some_line_has(self, err_lines, expected_error)
if __name__ == '__main__':
unittest.main(verbosity=2)
| [((25413, 25439), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (25426, 25439), False, 'import unittest\n'), ((13604, 13691), 'subprocess.run', 'subprocess.run', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'check': '(True)'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n check=True)\n', (13618, 13691), False, 'import subprocess\n'), ((21601, 21647), 'tests.output_parser.parse_from_gtf', 'output_parser.parse_from_gtf', (['from_gtf_se_path'], {}), '(from_gtf_se_path)\n', (21629, 21647), True, 'import tests.output_parser as output_parser\n'), ((22174, 22216), 'tests.output_parser.parse_jc_raw', 'output_parser.parse_jc_raw', (['jc_raw_se_path'], {}), '(jc_raw_se_path)\n', (22200, 22216), True, 'import tests.output_parser as output_parser\n'), ((22799, 22843), 'tests.output_parser.parse_mats_jc', 'output_parser.parse_mats_jc', (['se_mats_jc_path'], {}), '(se_mats_jc_path)\n', (22826, 22843), True, 'import tests.output_parser as output_parser\n'), ((15806, 15865), 'tests.output_parser.parse_dot_rmats', 'output_parser.parse_dot_rmats', (['dot_rmats_paths[dot_rmats_i]'], {}), '(dot_rmats_paths[dot_rmats_i])\n', (15835, 15865), True, 'import tests.output_parser as output_parser\n'), ((18337, 18396), 'tests.output_parser.parse_dot_rmats', 'output_parser.parse_dot_rmats', (['dot_rmats_paths[dot_rmats_i]'], {}), '(dot_rmats_paths[dot_rmats_i])\n', (18366, 18396), True, 'import tests.output_parser as output_parser\n')] |
kruskod/nltk | nltk/align/util.py | dba7b5431b1d57a75d50e048961c1a203b98c3da | # Natural Language Toolkit: Aligner Utilities
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Anna Garbar
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from nltk.align.api import Alignment
def pharaohtext2tuples(pharaoh_text):
"""
Converts pharaoh text format into an Alignment object (a list of tuples).
>>> pharaoh_text = '0-0 2-1 9-2 21-3 10-4 7-5'
>>> pharaohtext2tuples(pharaoh_text)
Alignment([(0, 0), (2, 1), (7, 5), (9, 2), (10, 4), (21, 3)])
:type pharaoh_text: str
:param pharaoh_text: the word alignment outputs in the pharaoh output format
:rtype: Alignment
:return: An Alignment object that contains a list of integer tuples
"""
# Converts integers to strings for a word alignment point.
list_of_tuples = [tuple(map(int,a.split('-'))) for a in pharaoh_text.split()]
return Alignment(list_of_tuples)
def alignment2pharaohtext(alignment):
"""
Converts an Alignment object (a list of tuples) into pharaoh text format.
>>> alignment = [(0, 0), (2, 1), (9, 2), (21, 3), (10, 4), (7, 5)]
>>> alignment2pharaohtext(alignment)
'0-0 2-1 9-2 21-3 10-4 7-5'
:type alignment: Alignment
:param alignment: An Alignment object that contains a list of integer tuples
:rtype: str
:return: the word alignment outputs in the pharaoh output format
"""
pharaoh_text = ' '.join(str(i) + "-" + str(j) for i,j in alignment)
return pharaoh_text
| [((898, 923), 'nltk.align.api.Alignment', 'Alignment', (['list_of_tuples'], {}), '(list_of_tuples)\n', (907, 923), False, 'from nltk.align.api import Alignment\n')] |
khanhgithead/grr | grr/server/grr_response_server/databases/db_yara_test_lib.py | 8ad8a4d2c5a93c92729206b7771af19d92d4f915 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""A module with test cases for the YARA database method."""
import os
from grr_response_server.databases import db
from grr_response_server.rdfvalues import objects as rdf_objects
class DatabaseTestYaraMixin(object):
"""A mixin class for testing YARA methods of database implementations."""
def testWriteYaraSignatureReferenceIncorrectUsername(self):
blob_id = rdf_objects.BlobID(os.urandom(32))
with self.assertRaises(db.UnknownGRRUserError) as context:
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="quux")
self.assertEqual(context.exception.username, "quux")
def testWriteYaraSignatureReferenceDuplicated(self):
self.db.WriteGRRUser("foo")
blob_id = rdf_objects.BlobID(os.urandom(32))
# Writing duplicated signatures is possible, it should not raise.
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
def testVerifyYaraSignatureReferenceSimple(self):
self.db.WriteGRRUser("foo")
blob_id = rdf_objects.BlobID(os.urandom(32))
self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo")
self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id))
def testVerifyYaraSignatureReferenceIncorrect(self):
blob_id = rdf_objects.BlobID(os.urandom(32))
self.assertFalse(self.db.VerifyYaraSignatureReference(blob_id))
| [((442, 456), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (452, 456), False, 'import os\n'), ((778, 792), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (788, 792), False, 'import os\n'), ((1130, 1144), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (1140, 1144), False, 'import os\n'), ((1376, 1390), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (1386, 1390), False, 'import os\n')] |
4aHxKzD/gpytorch | gpytorch/kernels/inducing_point_kernel.py | 7193545f88820ea04588b983f1d7ed603a59a27c | #!/usr/bin/env python3
import copy
import math
import torch
from ..distributions import MultivariateNormal
from ..lazy import DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor, delazify
from ..mlls import InducingPointKernelAddedLossTerm
from ..models import exact_prediction_strategies
from ..utils.cholesky import psd_safe_cholesky
from .kernel import Kernel
class InducingPointKernel(Kernel):
def __init__(self, base_kernel, inducing_points, likelihood, active_dims=None):
super(InducingPointKernel, self).__init__(active_dims=active_dims)
self.base_kernel = base_kernel
self.likelihood = likelihood
if inducing_points.ndimension() == 1:
inducing_points = inducing_points.unsqueeze(-1)
self.register_parameter(name="inducing_points", parameter=torch.nn.Parameter(inducing_points))
self.register_added_loss_term("inducing_point_loss_term")
def _clear_cache(self):
if hasattr(self, "_cached_kernel_mat"):
del self._cached_kernel_mat
@property
def _inducing_mat(self):
if not self.training and hasattr(self, "_cached_kernel_mat"):
return self._cached_kernel_mat
else:
res = delazify(self.base_kernel(self.inducing_points, self.inducing_points))
if not self.training:
self._cached_kernel_mat = res
return res
@property
def _inducing_inv_root(self):
if not self.training and hasattr(self, "_cached_kernel_inv_root"):
return self._cached_kernel_inv_root
else:
chol = psd_safe_cholesky(self._inducing_mat, upper=True)
eye = torch.eye(chol.size(-1), device=chol.device, dtype=chol.dtype)
inv_root = torch.triangular_solve(eye, chol)[0]
res = inv_root
if not self.training:
self._cached_kernel_inv_root = res
return res
def _get_covariance(self, x1, x2):
k_ux1 = delazify(self.base_kernel(x1, self.inducing_points))
if torch.equal(x1, x2):
covar = LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root))
# Diagonal correction for predictive posterior
if not self.training:
correction = (self.base_kernel(x1, x2, diag=True) - covar.diag()).clamp(0, math.inf)
covar = LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction))
else:
k_ux2 = delazify(self.base_kernel(x2, self.inducing_points))
covar = MatmulLazyTensor(
k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2)
)
return covar
def _covar_diag(self, inputs):
if inputs.ndimension() == 1:
inputs = inputs.unsqueeze(1)
# Get diagonal of covar
covar_diag = delazify(self.base_kernel(inputs, diag=True))
return DiagLazyTensor(covar_diag)
def forward(self, x1, x2, diag=False, **kwargs):
covar = self._get_covariance(x1, x2)
if self.training:
if not torch.equal(x1, x2):
raise RuntimeError("x1 should equal x2 in training mode")
zero_mean = torch.zeros_like(x1.select(-1, 0))
new_added_loss_term = InducingPointKernelAddedLossTerm(
MultivariateNormal(zero_mean, self._covar_diag(x1)),
MultivariateNormal(zero_mean, covar),
self.likelihood,
)
self.update_added_loss_term("inducing_point_loss_term", new_added_loss_term)
if diag:
return covar.diag()
else:
return covar
def num_outputs_per_input(self, x1, x2):
return self.base_kernel.num_outputs_per_input(x1, x2)
def __deepcopy__(self, memo):
replace_inv_root = False
replace_kernel_mat = False
if hasattr(self, "_cached_kernel_inv_root"):
replace_inv_root = True
kernel_inv_root = self._cached_kernel_inv_root
if hasattr(self, "_cached_kernel_mat"):
replace_kernel_mat = True
kernel_mat = self._cached_kernel_mat
cp = self.__class__(
base_kernel=copy.deepcopy(self.base_kernel),
inducing_points=copy.deepcopy(self.inducing_points),
likelihood=self.likelihood,
active_dims=self.active_dims,
)
if replace_inv_root:
cp._cached_kernel_inv_root = kernel_inv_root
if replace_kernel_mat:
cp._cached_kernel_mat = kernel_mat
return cp
def prediction_strategy(self, train_inputs, train_prior_dist, train_labels, likelihood):
# Allow for fast variances
return exact_prediction_strategies.SGPRPredictionStrategy(
train_inputs, train_prior_dist, train_labels, likelihood
)
| [((2083, 2102), 'torch.equal', 'torch.equal', (['x1', 'x2'], {}), '(x1, x2)\n', (2094, 2102), False, 'import torch\n'), ((848, 883), 'torch.nn.Parameter', 'torch.nn.Parameter', (['inducing_points'], {}), '(inducing_points)\n', (866, 883), False, 'import torch\n'), ((1790, 1823), 'torch.triangular_solve', 'torch.triangular_solve', (['eye', 'chol'], {}), '(eye, chol)\n', (1812, 1823), False, 'import torch\n'), ((3143, 3162), 'torch.equal', 'torch.equal', (['x1', 'x2'], {}), '(x1, x2)\n', (3154, 3162), False, 'import torch\n'), ((4262, 4293), 'copy.deepcopy', 'copy.deepcopy', (['self.base_kernel'], {}), '(self.base_kernel)\n', (4275, 4293), False, 'import copy\n'), ((4323, 4358), 'copy.deepcopy', 'copy.deepcopy', (['self.inducing_points'], {}), '(self.inducing_points)\n', (4336, 4358), False, 'import copy\n')] |
Jotasenpai/DigitalMediaStoreRESTfull | app/__init__.py | bb776d398e1756b1ff2fd4f392b80479ae29847d | import logging
import os
from flask import Flask
from flask_cors import CORS
from app.extensions import api
from app.extensions.database import db
from app.extensions.schema import ma
from app.views import albums, artists, hello, tracks
def create_app(config, **kwargs):
logging.basicConfig(level=logging.INFO)
app = Flask(__name__, **kwargs)
CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config.from_object(config)
# app.url_map.strict_slashes = False
with app.app_context():
api.init_app(app)
db.init_app(app)
db.create_all()
ma.init_app(app)
api.register_blueprint(hello.blp)
api.register_blueprint(artists.blp)
api.register_blueprint(albums.blp)
api.register_blueprint(tracks.blp)
try:
os.makedirs(app.instance_path)
except OSError:
pass
return app
| [((280, 319), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (299, 319), False, 'import logging\n'), ((331, 356), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__, **kwargs)\n', (336, 356), False, 'from flask import Flask\n'), ((361, 410), 'flask_cors.CORS', 'CORS', (['app'], {'resources': "{'/api/*': {'origins': '*'}}"}), "(app, resources={'/api/*': {'origins': '*'}})\n", (365, 410), False, 'from flask_cors import CORS\n'), ((526, 543), 'app.extensions.api.init_app', 'api.init_app', (['app'], {}), '(app)\n', (538, 543), False, 'from app.extensions import api\n'), ((553, 569), 'app.extensions.database.db.init_app', 'db.init_app', (['app'], {}), '(app)\n', (564, 569), False, 'from app.extensions.database import db\n'), ((578, 593), 'app.extensions.database.db.create_all', 'db.create_all', ([], {}), '()\n', (591, 593), False, 'from app.extensions.database import db\n'), ((603, 619), 'app.extensions.schema.ma.init_app', 'ma.init_app', (['app'], {}), '(app)\n', (614, 619), False, 'from app.extensions.schema import ma\n'), ((629, 662), 'app.extensions.api.register_blueprint', 'api.register_blueprint', (['hello.blp'], {}), '(hello.blp)\n', (651, 662), False, 'from app.extensions import api\n'), ((671, 706), 'app.extensions.api.register_blueprint', 'api.register_blueprint', (['artists.blp'], {}), '(artists.blp)\n', (693, 706), False, 'from app.extensions import api\n'), ((715, 749), 'app.extensions.api.register_blueprint', 'api.register_blueprint', (['albums.blp'], {}), '(albums.blp)\n', (737, 749), False, 'from app.extensions import api\n'), ((758, 792), 'app.extensions.api.register_blueprint', 'api.register_blueprint', (['tracks.blp'], {}), '(tracks.blp)\n', (780, 792), False, 'from app.extensions import api\n'), ((811, 841), 'os.makedirs', 'os.makedirs', (['app.instance_path'], {}), '(app.instance_path)\n', (822, 841), False, 'import os\n')] |
SASHA-PAIS/A-Flask-web-app-for-inventory-management | app.py | e6ed1b0d1d06ba04f9930f7653ce0504ecf81dd3 | from flask import Flask, url_for, request, redirect
from flask import render_template as render
from flask_mysqldb import MySQL
import yaml
import json
import MySQLdb
import decimal
class Encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return str(obj)
# Setting up the flask instance
app = Flask(__name__)
# Configure the database
db = yaml.load(open('db.yaml'))
app.config['MYSQL_HOST'] = db['mysql_host']
app.config['MYSQL_USER'] = db['mysql_user']
app.config['MYSQL_PASSWORD'] = db['mysql_password']
app.config['MYSQL_DB'] = db['mysql_db']
mysql = MySQL(app)
link = {x:x for x in ["location", "product", "movement"]}
link["index"] = '/'
def init_database():
cursor = mysql.connection.cursor()
# Initialise all tables
cursor.execute("""
CREATE TABLE IF NOT EXISTS products(prod_id integer primary key auto_increment,
prod_name varchar(20) UNIQUE NOT NULL,
prod_quantity integer not null,
unallocated_quantity integer);
""")
# Might have to create a trigger, let's see!
cursor.execute("""
CREATE TABLE IF NOT EXISTS location(loc_id integer primary key auto_increment,
loc_name varchar(20) unique not null);
""")
cursor.execute("""
CREATE TABLE IF NOT EXISTS logistics(trans_id integer primary key auto_increment,
prod_id INTEGER NOT NULL,
from_loc_id INTEGER NULL,
to_loc_id INTEGER NULL,
prod_quantity INTEGER NOT NULL,
trans_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY(prod_id) REFERENCES products(prod_id),
FOREIGN KEY(from_loc_id) REFERENCES location(loc_id),
FOREIGN KEY(to_loc_id) REFERENCES location(loc_id));
""")
mysql.connection.commit()
cursor.close()
@app.route('/')
def summary():
init_database()
msg = None
q_data, warehouse, products = None, None, None
cursor = mysql.connection.cursor()
try:
cursor.execute("Select * from location")
warehouse = cursor.fetchall()
cursor.execute("Select * from products")
products = cursor.fetchall()
cursor.execute("""
SELECT prod_name, unallocated_quantity, prod_quantity FROM products
""")
q_data = cursor.fetchall()
except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e:
msg = f"An error occured: {e}"
print(msg)
cursor.close()
return render('index.html',link=link, title = "Summary", warehouses = warehouse, products = products, database = q_data)
@app.route('/location.html', methods=['POST', 'GET'])
def location():
init_database()
msg=None
cursor = mysql.connection.cursor()
cursor.execute("SELECT * FROM location ORDER BY loc_id")
warehouse_data = cursor.fetchall()
cursor.execute("SELECT loc_name FROM location")
loc_names = cursor.fetchall()
loc_new = []
for i in range(len(loc_names)):
loc_new.append(loc_names[i][0])
if request.method == 'POST':
warehouse_name = request.form['warehouse_name']
warehouse_name = warehouse_name.capitalize()
transaction_allowed = False
if warehouse_name not in ['', ' ', None] and warehouse_name not in loc_new:
transaction_allowed=True
if transaction_allowed:
try:
cursor.execute("INSERT INTO location(loc_name) VALUES(%s)", (warehouse_name,))
mysql.connection.commit()
except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e:
msg = f"An error occured: {e}"
else:
msg = f"{warehouse_name} added succcessfully"
if msg:
print(msg)
cursor.close()
return redirect(url_for('location'))
return render('location.html', link=link, warehouses=warehouse_data, transaction_message=msg, title = "Warehouse Locations")
@app.route('/product.html', methods=['POST', 'GET'])
def product():
init_database()
msg=None
cursor = mysql.connection.cursor()
cursor.execute("SELECT * from products")
products = cursor.fetchall()
cursor.execute("SELECT prod_name FROM products")
prod_names = cursor.fetchall()
prod_new = []
for i in range(len(prod_names)):
prod_new.append(prod_names[i][0])
if request.method == 'POST':
prod_name = request.form['prod_name']
quantity = request.form['prod_quantity']
prod_name = prod_name.capitalize()
transaction_allowed = False
if prod_name not in ['', ' ', None] and prod_name not in prod_new:
if quantity not in ['', ' ', None]:
transaction_allowed= True
if transaction_allowed:
try:
cursor.execute("INSERT INTO products(prod_name, prod_quantity, unallocated_quantity) VALUES (%s, %s, %s)", (prod_name, quantity, quantity))
mysql.connection.commit()
except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e:
msg = f"An error occured: {e}"
else:
msg = f"{prod_name} added succcessfully"
if msg:
print(msg)
cursor.close()
return redirect(url_for('product'))
return render('product.html', link=link, products = products, transaction_message=msg, title="Products Log")
@app.route('/movement.html', methods=['POST', 'GET'])
def movement():
init_database()
msg=None
cursor = mysql.connection.cursor()
cursor.execute("SELECT * FROM logistics")
logistics_data = cursor.fetchall()
cursor.execute("SELECT prod_id, prod_name, unallocated_quantity FROM products")
products = cursor.fetchall()
cursor.execute("SELECT loc_id, loc_name FROM location")
locations = cursor.fetchall()
# products - ((1, 'Piano', 250), (2, 'Iphone xr', 600), (6, 'Washing machine', 100), (7, 'Microwave', 50))
# x in product - (1, 'Piano', 250)
# x[0] = 1
# for p_id in [x[0] for x in products]:
# print(p_id)
# 1
# 2
# 6
# 7
# print(locations)
# for l_id in [x[0] for x in locations]:
# print(l_id)
# ((20, 'Andaman'), (19, 'Assam'), (26, 'Jodhpur'), (17, 'Puducherry'))
# 20
# 19
# 26
# 17
log_summary = []
for p_id in [x[0] for x in products]:
cursor.execute("SELECT prod_name FROM products WHERE prod_id = %s", str(p_id,))
temp_prod_name = cursor.fetchone()
#print(temp_prod_name) ('Piano',)
for l_id in [x[0] for x in locations]:
cursor.execute("SELECT loc_name FROM location WHERE loc_id = %s", (l_id,)) #str(l_id,) giving an error
temp_loc_name = cursor.fetchone()
# print(temp_loc_name) - (Andaman,)
#e.g. prod_id = 1 = piano, loc_id = 1 = andaman
cursor.execute("""
SELECT SUM(log.prod_quantity)
FROM logistics log
WHERE log.prod_id = %s AND log.to_loc_id = %s
""", (p_id, l_id))
sum_to_loc = cursor.fetchone() # No.of pianos that enter andaman
cursor.execute("""
SELECT SUM(log.prod_quantity)
FROM logistics log
WHERE log.prod_id = %s AND log.from_loc_id = %s
""", (p_id, l_id))
sum_from_loc = cursor.fetchone() # No. of pianos that leave andaman
# print(sum_from_loc)
if sum_from_loc[0] is None: #e.g. (None,) --> (0,) --> No pianos leave andaman
sum_from_loc = (0,)
if sum_to_loc[0] is None: #No pianos enter andaman
sum_to_loc = (0,)
#how much enters andaman - how much leaves andaman = how much remains (allocated) in andaman
# log_summary += [(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],) )] ORRRRRRRRRRR
log_summary.append(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],)) # (Piano,) + (Andaman,), (0,) = ('Piano', 'Andaman', 0)
#print(log_summary)
# [('Piano', 'Andaman', 0), ('Piano', 'Assam', 0), ('Piano', 'Jodhpur', 0), ('Piano', 'Puducherry', 0),
# ('Iphone xr', 'Andaman', 0), ('Iphone xr', 'Assam', 0), ('Iphone xr', 'Jodhpur', 0), ('Iphone xr', 'Puducherry', 0),
# ('Washing machine', 'Andaman', 0), ('Washing machine', 'Assam', 0), ('Washing machine', 'Jodhpur', 0), ('Washing machine', 'Puducherry', 0),
# ('Microwave', 'Andaman', 0), ('Microwave', 'Assam', 0), ('Microwave', 'Jodhpur', 0), ('Microwave', 'Puducherry', 0)]
alloc_json = {}
for row in log_summary:
try:
if row[1] in alloc_json[row[0]].keys(): #Check if Andaman exists in Piano ka keys, Check if Assam, exists in Piano ka keys, etc.
alloc_json[row[0]][row[1]] += row[2] #If yes, the add the quantity to the previous quantity
else:
alloc_json[row[0]][row[1]] = row[2] #If no, add it as a new quantity
except (KeyError, TypeError):
alloc_json[row[0]] = {} #Make the value of piano empty
alloc_json[row[0]][row[1]] = row[2] #Add Andaman with quantity as a new value in the dictionary
#print(alloc_json)
# {'Piano': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0},
# 'Iphone xr': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0},
# 'Washing machine': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0},
# 'Microwave': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}}
alloc_json = json.dumps(alloc_json, cls = Encoder)
# print(alloc_json)
# {"Piano": {"Andaman": 0, "Assam": 0, "Jodhpur": 0, "Puducherry": 0},
# "Iphone xr": {"Andaman": 0, "Assam": 0, "Jodhpur": 0, "Puducherry": 0},
# "Washing machine": {"Andaman": 0, "Assam": 0, "Jodhpur": 0, "Puducherry": 0},
# "Microwave": {"Andaman": 0, "Assam": 0, "Jodhpur": 0, "Puducherry": 0}}
if request.method == 'POST':
# transaction times are stored in UTC
prod_name = request.form['prod_name']
from_loc = request.form['from_loc']
to_loc = request.form['to_loc']
quantity = request.form['quantity']
# if no 'from loc' is given, that means the product is being shipped to a warehouse (init condition)
if from_loc in [None, '', ' ']:
try:
cursor.execute("""
INSERT INTO logistics(prod_id, to_loc_id, prod_quantity)
SELECT products.prod_id, location.loc_id, %s
FROM products, location
WHERE products.prod_name = %s AND location.loc_name = %s
""", (quantity, prod_name, to_loc))
# IMPORTANT to maintain consistency
cursor.execute("""
UPDATE products
SET unallocated_quantity = unallocated_quantity - %s
WHERE prod_name = %s
""", (quantity, prod_name))
mysql.connection.commit()
except (MySQLdb.Error, MySQLdb.Warning) as e:
msg = f"An error occured: {e}"
else:
msg = "Transaction added successfully"
elif to_loc in [None, '', ' ']:
print("To Location wasn't specified, will be unallocated")
try:
cursor.execute("""
INSERT INTO logistics(prod_id, from_loc_id, prod_quantity)
SELECT products.prod_id, location.loc_id, %s
FROM products, location
WHERE products.prod_name = %s AND location.loc_name = %s
""", (quantity, prod_name, from_loc))
#Important to maintain consistency
cursor.execute("""
UPDATE products
SET unallocated_quantity = unallocated_quantity + %s
WHERE prod_name = %s
""", (quantity, prod_name))
mysql.connection.commit()
except(MySQLdb.Error, MySQLdb.Warning) as e:
msg=f"An error occurred: {e}"
else:
msg = "Transaction added successfully"
# if 'from loc' and 'to_loc' given the product is being shipped between warehouses
else:
try:
cursor.execute("SELECT loc_id FROM location WHERE loc_name = %s", (from_loc,))
from_loc = ''.join([str(x[0]) for x in cursor.fetchall()])
# cursor.fetchall -> ((1,)), x -> (1,) x[0] -> 1 join converts 1 into a string
cursor.execute("SELECT loc_id FROM location WHERE loc_name = %s", (to_loc,))
to_loc = ''.join([str(x[0]) for x in cursor.fetchall() ])
cursor.execute("SELECT prod_id FROM products WHERE prod_name = %s", (prod_name,))
prod_id = ''.join([str(x[0]) for x in cursor.fetchall() ])
cursor.execute("""
INSERT INTO logistics(prod_id, from_loc_id, to_loc_id, prod_quantity)
VALUES(%s, %s, %s, %s)
""", (prod_id, from_loc, to_loc, quantity))
mysql.connection.commit()
except(MySQLdb.Error, MySQLdb.Warning) as e:
msg=f"An error occurred: {e}"
else:
msg = "Transaction added successfully"
#Print a transaction message if exists!
if msg:
print(msg)
cursor.close()
return redirect(url_for('movement'))
return render('movement.html', title = "Product Movement", link=link, trans_message=msg, products=products, locations=locations, allocated = alloc_json, logs = logistics_data, database = log_summary)
@app.route('/delete')
def delete():
# Make sure that the queries are working properly....I'm having some doubts about the datatypes
type_ = request.args.get('type')
cursor = mysql.connection.cursor()
if type_ == 'location':
id_ = request.args.get('loc_id')
cursor.execute("SELECT prod_id, SUM(prod_quantity) FROM logistics where to_loc_id = %s GROUP BY prod_id", (id_,))
in_place = cursor.fetchall()
cursor.execute("SELECT prod_id, SUM(prod_quantity) FROM logistics where from_loc_id = %s GROUP BY prod_id", (id_,))
out_place = cursor.fetchall()
#Convert list of tuples to dict
in_place = dict(in_place)
out_place = dict(out_place)
all_place = {}
#Inplace = {1:20, 3:2000} - keys - prod_id - toloc = mumbai
#out_place = {3:100} - keys - prod_id - fromloc = mumbai
for x in in_place.keys(): #calculator entered mumbai
if x in out_place.keys(): #calculator left mumbai
all_place[x] = in_place[x] - out_place[x] #2000 fridges came to mumbai from kolkata, 100 fridges were sent to daman diu, therefore, 1900 remains in mumbai which will be unallocated if mumbai is deleted
else:
all_place[x] = in_place[x]
for products_ in all_place.keys():
cursor.execute("""
UPDATE products SET unallocated_quantity = unallocated_quantity + %s WHERE prod_id = %s
""", (all_place[products_], products_))
cursor.execute("DELETE FROM location where loc_id = %s", (id_,))
mysql.connection.commit()
cursor.close()
return redirect(url_for('location'))
elif type_ == 'product':
id_ = request.args.get('prod_id')
cursor.execute("DELETE FROM products WHERE prod_id = %s", (id_,))
mysql.connection.commit()
cursor.close()
return redirect(url_for('product'))
@app.route('/edit', methods=['POST', 'GET'])
def edit():
# Try capitalize()
type_ = request.args.get('type')
cursor = mysql.connection.cursor()
cursor.execute("SELECT loc_name FROM location")
loc_names = cursor.fetchall()
loc_new = []
for i in range(len(loc_names)):
loc_new.append(loc_names[i][0])
cursor.execute("SELECT prod_name FROM products")
prod_names = cursor.fetchall()
prod_new = []
for i in range(len(prod_names)):
prod_new.append(prod_names[i][0])
if type_ == 'location' and request.method == 'POST':
loc_id = request.form['loc_id']
loc_name = request.form['loc_name']
loc_name = loc_name.capitalize()
if loc_name not in ['', ' ', None] and loc_name not in loc_new:
cursor.execute("UPDATE location SET loc_name = %s WHERE loc_id = %s", (loc_name, loc_id))
mysql.connection.commit()
cursor.close()
return redirect(url_for('location'))
elif type_ == 'product' and request.method == 'POST':
prod_id = request.form['product_id']
prod_name = request.form['prod_name']
prod_quantity = request.form['prod_quantity']
prod_name = prod_name.capitalize()
if prod_name not in ['', ' ', None] and prod_name not in prod_new:
cursor.execute("UPDATE products SET prod_name = %s WHERE prod_id = %s", (prod_name, str(prod_id)))
if prod_quantity not in ['', ' ', None] and prod_name not in prod_new:
cursor.execute("SELECT prod_quantity FROM products WHERE prod_id = %s", (prod_id,))
old_prod_quantity = cursor.fetchone()[0]
cursor.execute("""
UPDATE products SET prod_quantity = %s, unallocated_quantity = unallocated_quantity + %s - %s
WHERE prod_id = %s
""", (prod_quantity, prod_quantity, old_prod_quantity, str(prod_id)))
mysql.connection.commit()
cursor.close()
return redirect(url_for('product'))
return render(url_for(type_))
if __name__ == '__main__':
app.run(debug=True) | [((359, 374), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (364, 374), False, 'from flask import Flask, url_for, request, redirect\n'), ((622, 632), 'flask_mysqldb.MySQL', 'MySQL', (['app'], {}), '(app)\n', (627, 632), False, 'from flask_mysqldb import MySQL\n'), ((2494, 2604), 'flask.render_template', 'render', (['"""index.html"""'], {'link': 'link', 'title': '"""Summary"""', 'warehouses': 'warehouse', 'products': 'products', 'database': 'q_data'}), "('index.html', link=link, title='Summary', warehouses=warehouse,\n products=products, database=q_data)\n", (2500, 2604), True, 'from flask import render_template as render\n'), ((3868, 3987), 'flask.render_template', 'render', (['"""location.html"""'], {'link': 'link', 'warehouses': 'warehouse_data', 'transaction_message': 'msg', 'title': '"""Warehouse Locations"""'}), "('location.html', link=link, warehouses=warehouse_data,\n transaction_message=msg, title='Warehouse Locations')\n", (3874, 3987), True, 'from flask import render_template as render\n'), ((5358, 5462), 'flask.render_template', 'render', (['"""product.html"""'], {'link': 'link', 'products': 'products', 'transaction_message': 'msg', 'title': '"""Products Log"""'}), "('product.html', link=link, products=products, transaction_message=\n msg, title='Products Log')\n", (5364, 5462), True, 'from flask import render_template as render\n'), ((9762, 9797), 'json.dumps', 'json.dumps', (['alloc_json'], {'cls': 'Encoder'}), '(alloc_json, cls=Encoder)\n', (9772, 9797), False, 'import json\n'), ((13811, 14005), 'flask.render_template', 'render', (['"""movement.html"""'], {'title': '"""Product Movement"""', 'link': 'link', 'trans_message': 'msg', 'products': 'products', 'locations': 'locations', 'allocated': 'alloc_json', 'logs': 'logistics_data', 'database': 'log_summary'}), "('movement.html', title='Product Movement', link=link, trans_message=\n msg, products=products, locations=locations, allocated=alloc_json, logs\n =logistics_data, database=log_summary)\n", (13817, 14005), True, 'from flask import render_template as render\n'), ((14155, 14179), 'flask.request.args.get', 'request.args.get', (['"""type"""'], {}), "('type')\n", (14171, 14179), False, 'from flask import Flask, url_for, request, redirect\n'), ((16041, 16065), 'flask.request.args.get', 'request.args.get', (['"""type"""'], {}), "('type')\n", (16057, 16065), False, 'from flask import Flask, url_for, request, redirect\n'), ((14262, 14288), 'flask.request.args.get', 'request.args.get', (['"""loc_id"""'], {}), "('loc_id')\n", (14278, 14288), False, 'from flask import Flask, url_for, request, redirect\n'), ((17988, 18002), 'flask.url_for', 'url_for', (['type_'], {}), '(type_)\n', (17995, 18002), False, 'from flask import Flask, url_for, request, redirect\n'), ((2352, 2378), 'MySQLdb.Error', 'MySQLdb.Error', (['(not Warning)'], {}), '(not Warning)\n', (2365, 2378), False, 'import MySQLdb\n'), ((2380, 2397), 'MySQLdb.Warning', 'MySQLdb.Warning', ([], {}), '()\n', (2395, 2397), False, 'import MySQLdb\n'), ((13777, 13796), 'flask.url_for', 'url_for', (['"""movement"""'], {}), "('movement')\n", (13784, 13796), False, 'from flask import Flask, url_for, request, redirect\n'), ((15675, 15694), 'flask.url_for', 'url_for', (['"""location"""'], {}), "('location')\n", (15682, 15694), False, 'from flask import Flask, url_for, request, redirect\n'), ((15740, 15767), 'flask.request.args.get', 'request.args.get', (['"""prod_id"""'], {}), "('prod_id')\n", (15756, 15767), False, 'from flask import Flask, url_for, request, redirect\n'), ((16920, 16939), 'flask.url_for', 'url_for', (['"""location"""'], {}), "('location')\n", (16927, 16939), False, 'from flask import Flask, url_for, request, redirect\n'), ((3835, 3854), 'flask.url_for', 'url_for', (['"""location"""'], {}), "('location')\n", (3842, 3854), False, 'from flask import Flask, url_for, request, redirect\n'), ((5326, 5344), 'flask.url_for', 'url_for', (['"""product"""'], {}), "('product')\n", (5333, 5344), False, 'from flask import Flask, url_for, request, redirect\n'), ((15924, 15942), 'flask.url_for', 'url_for', (['"""product"""'], {}), "('product')\n", (15931, 15942), False, 'from flask import Flask, url_for, request, redirect\n'), ((17949, 17967), 'flask.url_for', 'url_for', (['"""product"""'], {}), "('product')\n", (17956, 17967), False, 'from flask import Flask, url_for, request, redirect\n'), ((3550, 3576), 'MySQLdb.Error', 'MySQLdb.Error', (['(not Warning)'], {}), '(not Warning)\n', (3563, 3576), False, 'import MySQLdb\n'), ((3578, 3595), 'MySQLdb.Warning', 'MySQLdb.Warning', ([], {}), '()\n', (3593, 3595), False, 'import MySQLdb\n'), ((5046, 5072), 'MySQLdb.Error', 'MySQLdb.Error', (['(not Warning)'], {}), '(not Warning)\n', (5059, 5072), False, 'import MySQLdb\n'), ((5074, 5091), 'MySQLdb.Warning', 'MySQLdb.Warning', ([], {}), '()\n', (5089, 5091), False, 'import MySQLdb\n')] |
zmxdream/Paddle | python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_mean_op.py | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig
class TRTReduceMeanTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, -1, -1], dtype="float32")
reduce_mean = fluid.layers.reduce_mean(
data, dim=[2, -1], keep_dim=True)
out = fluid.layers.batch_norm(reduce_mean, is_test=True)
self.feeds = {
"data": np.random.random([3, 3, 56, 56]).astype("float32"),
}
self.enable_trt = True
self.trt_parameters = TRTReduceMeanTest.TensorRTParam(
1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False)
self.fetch_list = [out]
self.dynamic_shape_params = TRTReduceMeanTest.DynamicShapeParam({
'data': [1, 3, 16, 16]
}, {'data': [3, 3, 56, 56]}, {'data': [3, 3, 56, 56]}, False)
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TRTReduceMeanAllNoBatchTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, -1, -1], dtype="float32")
reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True)
out = fluid.layers.batch_norm(reduce_mean, is_test=True)
self.feeds = {
"data": np.random.random([3, 3, 56, 56]).astype("float32"),
}
self.enable_trt = True
self.trt_parameters = TRTReduceMeanAllNoBatchTest.TensorRTParam(
1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False)
self.fetch_list = [out]
self.dynamic_shape_params = TRTReduceMeanAllNoBatchTest.DynamicShapeParam(
{
'data': [1, 3, 16, 16]
}, {'data': [3, 3, 56, 56]}, {'data': [3, 3, 56, 56]}, False)
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TRTReduceMeanTestFP16(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, -1, -1], dtype="float32")
reduce_mean = fluid.layers.reduce_mean(
data, dim=[2, -1], keep_dim=True)
out = fluid.layers.batch_norm(reduce_mean, is_test=True)
self.feeds = {
"data": np.random.random([3, 3, 56, 56]).astype("float32"),
}
self.enable_trt = True
self.trt_parameters = TRTReduceMeanTestFP16.TensorRTParam(
1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False)
self.fetch_list = [out]
self.dynamic_shape_params = TRTReduceMeanTestFP16.DynamicShapeParam({
'data': [1, 3, 16, 16]
}, {'data': [3, 3, 56, 56]}, {'data': [3, 3, 56, 56]}, False)
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TRTReduceMeanAllTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 56, 56], dtype="float32")
reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True)
out = fluid.layers.batch_norm(reduce_mean, is_test=True)
self.feeds = {
"data": np.random.random([3, 3, 56, 56]).astype("float32"),
}
self.enable_trt = True
self.trt_parameters = TRTReduceMeanAllTest.TensorRTParam(
1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False)
self.fetch_list = [out]
self.dynamic_shape_params = TRTReduceMeanAllTest.DynamicShapeParam({
'data': [1, 3, 56, 56]
}, {'data': [3, 3, 56, 56]}, {'data': [3, 3, 56, 56]}, False)
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TRTReduceMeanTestStatic(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[3, 3, 56, 56], dtype="float32")
reduce_mean = fluid.layers.reduce_mean(
data, dim=[2, -1], keep_dim=True)
out = fluid.layers.batch_norm(reduce_mean, is_test=True)
self.feeds = {
"data": np.random.random([3, 3, 56, 56]).astype("float32"),
}
self.enable_trt = True
self.trt_parameters = TRTReduceMeanTestStatic.TensorRTParam(
1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False)
self.fetch_list = [out]
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TRTReduceMeanStaticAllTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[4, 3, 56, 56], dtype="float32")
reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True)
out = fluid.layers.batch_norm(reduce_mean, is_test=True)
self.feeds = {
"data": np.random.random([4, 3, 56, 56]).astype("float32"),
}
self.enable_trt = True
self.trt_parameters = TRTReduceMeanStaticAllTest.TensorRTParam(
1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False)
self.fetch_list = [out]
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TRTReduceMeanStaticFP16(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[4, 3, 56, 56], dtype="float32")
reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True)
out = fluid.layers.batch_norm(reduce_mean, is_test=True)
self.feeds = {
"data": np.random.random([4, 3, 56, 56]).astype("float32"),
}
self.enable_trt = True
self.trt_parameters = TRTReduceMeanStaticFP16.TensorRTParam(
1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False)
self.fetch_list = [out]
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TRTReduceMeanFP16Static(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[4, 3, 56, 56], dtype="float32")
reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True)
out = fluid.layers.batch_norm(reduce_mean, is_test=True)
self.feeds = {
"data": np.random.random([4, 3, 56, 56]).astype("float32"),
}
self.enable_trt = True
self.trt_parameters = TRTReduceMeanFP16Static.TensorRTParam(
1 << 30, 32, 1, AnalysisConfig.Precision.Half, True, False)
self.fetch_list = [out]
def test_check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
if __name__ == "__main__":
unittest.main()
| [((9538, 9553), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9551, 9553), False, 'import unittest\n'), ((1836, 1864), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (1862, 1864), True, 'import paddle.fluid.core as core\n'), ((3028, 3056), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (3054, 3056), True, 'import paddle.fluid.core as core\n'), ((4208, 4236), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (4234, 4236), True, 'import paddle.fluid.core as core\n'), ((5358, 5386), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (5384, 5386), True, 'import paddle.fluid.core as core\n'), ((6361, 6389), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (6387, 6389), True, 'import paddle.fluid.core as core\n'), ((7340, 7368), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (7366, 7368), True, 'import paddle.fluid.core as core\n'), ((8310, 8338), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (8336, 8338), True, 'import paddle.fluid.core as core\n'), ((9279, 9307), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (9305, 9307), True, 'import paddle.fluid.core as core\n'), ((971, 1031), 'paddle.fluid.program_guard', 'fluid.program_guard', (['self.main_program', 'self.startup_program'], {}), '(self.main_program, self.startup_program)\n', (990, 1031), True, 'import paddle.fluid as fluid\n'), ((1052, 1115), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""data"""', 'shape': '[-1, 3, -1, -1]', 'dtype': '"""float32"""'}), "(name='data', shape=[-1, 3, -1, -1], dtype='float32')\n", (1062, 1115), True, 'import paddle.fluid as fluid\n'), ((1159, 1217), 'paddle.fluid.layers.reduce_mean', 'fluid.layers.reduce_mean', (['data'], {'dim': '[2, -1]', 'keep_dim': '(True)'}), '(data, dim=[2, -1], keep_dim=True)\n', (1183, 1217), True, 'import paddle.fluid as fluid\n'), ((1253, 1303), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', (['reduce_mean'], {'is_test': '(True)'}), '(reduce_mean, is_test=True)\n', (1276, 1303), True, 'import paddle.fluid as fluid\n'), ((2152, 2212), 'paddle.fluid.program_guard', 'fluid.program_guard', (['self.main_program', 'self.startup_program'], {}), '(self.main_program, self.startup_program)\n', (2171, 2212), True, 'import paddle.fluid as fluid\n'), ((2233, 2296), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""data"""', 'shape': '[-1, 3, -1, -1]', 'dtype': '"""float32"""'}), "(name='data', shape=[-1, 3, -1, -1], dtype='float32')\n", (2243, 2296), True, 'import paddle.fluid as fluid\n'), ((2340, 2385), 'paddle.fluid.layers.reduce_mean', 'fluid.layers.reduce_mean', (['data'], {'keep_dim': '(True)'}), '(data, keep_dim=True)\n', (2364, 2385), True, 'import paddle.fluid as fluid\n'), ((2404, 2454), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', (['reduce_mean'], {'is_test': '(True)'}), '(reduce_mean, is_test=True)\n', (2427, 2454), True, 'import paddle.fluid as fluid\n'), ((3338, 3398), 'paddle.fluid.program_guard', 'fluid.program_guard', (['self.main_program', 'self.startup_program'], {}), '(self.main_program, self.startup_program)\n', (3357, 3398), True, 'import paddle.fluid as fluid\n'), ((3419, 3482), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""data"""', 'shape': '[-1, 3, -1, -1]', 'dtype': '"""float32"""'}), "(name='data', shape=[-1, 3, -1, -1], dtype='float32')\n", (3429, 3482), True, 'import paddle.fluid as fluid\n'), ((3526, 3584), 'paddle.fluid.layers.reduce_mean', 'fluid.layers.reduce_mean', (['data'], {'dim': '[2, -1]', 'keep_dim': '(True)'}), '(data, dim=[2, -1], keep_dim=True)\n', (3550, 3584), True, 'import paddle.fluid as fluid\n'), ((3620, 3670), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', (['reduce_mean'], {'is_test': '(True)'}), '(reduce_mean, is_test=True)\n', (3643, 3670), True, 'import paddle.fluid as fluid\n'), ((4517, 4577), 'paddle.fluid.program_guard', 'fluid.program_guard', (['self.main_program', 'self.startup_program'], {}), '(self.main_program, self.startup_program)\n', (4536, 4577), True, 'import paddle.fluid as fluid\n'), ((4598, 4661), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""data"""', 'shape': '[-1, 3, 56, 56]', 'dtype': '"""float32"""'}), "(name='data', shape=[-1, 3, 56, 56], dtype='float32')\n", (4608, 4661), True, 'import paddle.fluid as fluid\n'), ((4705, 4750), 'paddle.fluid.layers.reduce_mean', 'fluid.layers.reduce_mean', (['data'], {'keep_dim': '(True)'}), '(data, keep_dim=True)\n', (4729, 4750), True, 'import paddle.fluid as fluid\n'), ((4769, 4819), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', (['reduce_mean'], {'is_test': '(True)'}), '(reduce_mean, is_test=True)\n', (4792, 4819), True, 'import paddle.fluid as fluid\n'), ((5670, 5730), 'paddle.fluid.program_guard', 'fluid.program_guard', (['self.main_program', 'self.startup_program'], {}), '(self.main_program, self.startup_program)\n', (5689, 5730), True, 'import paddle.fluid as fluid\n'), ((5751, 5813), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""data"""', 'shape': '[3, 3, 56, 56]', 'dtype': '"""float32"""'}), "(name='data', shape=[3, 3, 56, 56], dtype='float32')\n", (5761, 5813), True, 'import paddle.fluid as fluid\n'), ((5857, 5915), 'paddle.fluid.layers.reduce_mean', 'fluid.layers.reduce_mean', (['data'], {'dim': '[2, -1]', 'keep_dim': '(True)'}), '(data, dim=[2, -1], keep_dim=True)\n', (5881, 5915), True, 'import paddle.fluid as fluid\n'), ((5951, 6001), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', (['reduce_mean'], {'is_test': '(True)'}), '(reduce_mean, is_test=True)\n', (5974, 6001), True, 'import paddle.fluid as fluid\n'), ((6676, 6736), 'paddle.fluid.program_guard', 'fluid.program_guard', (['self.main_program', 'self.startup_program'], {}), '(self.main_program, self.startup_program)\n', (6695, 6736), True, 'import paddle.fluid as fluid\n'), ((6757, 6819), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""data"""', 'shape': '[4, 3, 56, 56]', 'dtype': '"""float32"""'}), "(name='data', shape=[4, 3, 56, 56], dtype='float32')\n", (6767, 6819), True, 'import paddle.fluid as fluid\n'), ((6863, 6908), 'paddle.fluid.layers.reduce_mean', 'fluid.layers.reduce_mean', (['data'], {'keep_dim': '(True)'}), '(data, keep_dim=True)\n', (6887, 6908), True, 'import paddle.fluid as fluid\n'), ((6927, 6977), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', (['reduce_mean'], {'is_test': '(True)'}), '(reduce_mean, is_test=True)\n', (6950, 6977), True, 'import paddle.fluid as fluid\n'), ((7652, 7712), 'paddle.fluid.program_guard', 'fluid.program_guard', (['self.main_program', 'self.startup_program'], {}), '(self.main_program, self.startup_program)\n', (7671, 7712), True, 'import paddle.fluid as fluid\n'), ((7733, 7795), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""data"""', 'shape': '[4, 3, 56, 56]', 'dtype': '"""float32"""'}), "(name='data', shape=[4, 3, 56, 56], dtype='float32')\n", (7743, 7795), True, 'import paddle.fluid as fluid\n'), ((7839, 7884), 'paddle.fluid.layers.reduce_mean', 'fluid.layers.reduce_mean', (['data'], {'keep_dim': '(True)'}), '(data, keep_dim=True)\n', (7863, 7884), True, 'import paddle.fluid as fluid\n'), ((7903, 7953), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', (['reduce_mean'], {'is_test': '(True)'}), '(reduce_mean, is_test=True)\n', (7926, 7953), True, 'import paddle.fluid as fluid\n'), ((8622, 8682), 'paddle.fluid.program_guard', 'fluid.program_guard', (['self.main_program', 'self.startup_program'], {}), '(self.main_program, self.startup_program)\n', (8641, 8682), True, 'import paddle.fluid as fluid\n'), ((8703, 8765), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""data"""', 'shape': '[4, 3, 56, 56]', 'dtype': '"""float32"""'}), "(name='data', shape=[4, 3, 56, 56], dtype='float32')\n", (8713, 8765), True, 'import paddle.fluid as fluid\n'), ((8809, 8854), 'paddle.fluid.layers.reduce_mean', 'fluid.layers.reduce_mean', (['data'], {'keep_dim': '(True)'}), '(data, keep_dim=True)\n', (8833, 8854), True, 'import paddle.fluid as fluid\n'), ((8873, 8923), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', (['reduce_mean'], {'is_test': '(True)'}), '(reduce_mean, is_test=True)\n', (8896, 8923), True, 'import paddle.fluid as fluid\n'), ((2003, 2060), 'paddle.fluid.core.PassVersionChecker.IsCompatible', 'PassVersionChecker.IsCompatible', (['"""tensorrt_subgraph_pass"""'], {}), "('tensorrt_subgraph_pass')\n", (2034, 2060), False, 'from paddle.fluid.core import PassVersionChecker\n'), ((3195, 3252), 'paddle.fluid.core.PassVersionChecker.IsCompatible', 'PassVersionChecker.IsCompatible', (['"""tensorrt_subgraph_pass"""'], {}), "('tensorrt_subgraph_pass')\n", (3226, 3252), False, 'from paddle.fluid.core import PassVersionChecker\n'), ((4375, 4432), 'paddle.fluid.core.PassVersionChecker.IsCompatible', 'PassVersionChecker.IsCompatible', (['"""tensorrt_subgraph_pass"""'], {}), "('tensorrt_subgraph_pass')\n", (4406, 4432), False, 'from paddle.fluid.core import PassVersionChecker\n'), ((5525, 5582), 'paddle.fluid.core.PassVersionChecker.IsCompatible', 'PassVersionChecker.IsCompatible', (['"""tensorrt_subgraph_pass"""'], {}), "('tensorrt_subgraph_pass')\n", (5556, 5582), False, 'from paddle.fluid.core import PassVersionChecker\n'), ((6528, 6585), 'paddle.fluid.core.PassVersionChecker.IsCompatible', 'PassVersionChecker.IsCompatible', (['"""tensorrt_subgraph_pass"""'], {}), "('tensorrt_subgraph_pass')\n", (6559, 6585), False, 'from paddle.fluid.core import PassVersionChecker\n'), ((7507, 7564), 'paddle.fluid.core.PassVersionChecker.IsCompatible', 'PassVersionChecker.IsCompatible', (['"""tensorrt_subgraph_pass"""'], {}), "('tensorrt_subgraph_pass')\n", (7538, 7564), False, 'from paddle.fluid.core import PassVersionChecker\n'), ((8477, 8534), 'paddle.fluid.core.PassVersionChecker.IsCompatible', 'PassVersionChecker.IsCompatible', (['"""tensorrt_subgraph_pass"""'], {}), "('tensorrt_subgraph_pass')\n", (8508, 8534), False, 'from paddle.fluid.core import PassVersionChecker\n'), ((9446, 9503), 'paddle.fluid.core.PassVersionChecker.IsCompatible', 'PassVersionChecker.IsCompatible', (['"""tensorrt_subgraph_pass"""'], {}), "('tensorrt_subgraph_pass')\n", (9477, 9503), False, 'from paddle.fluid.core import PassVersionChecker\n'), ((1348, 1380), 'numpy.random.random', 'np.random.random', (['[3, 3, 56, 56]'], {}), '([3, 3, 56, 56])\n', (1364, 1380), True, 'import numpy as np\n'), ((2499, 2531), 'numpy.random.random', 'np.random.random', (['[3, 3, 56, 56]'], {}), '([3, 3, 56, 56])\n', (2515, 2531), True, 'import numpy as np\n'), ((3715, 3747), 'numpy.random.random', 'np.random.random', (['[3, 3, 56, 56]'], {}), '([3, 3, 56, 56])\n', (3731, 3747), True, 'import numpy as np\n'), ((4864, 4896), 'numpy.random.random', 'np.random.random', (['[3, 3, 56, 56]'], {}), '([3, 3, 56, 56])\n', (4880, 4896), True, 'import numpy as np\n'), ((6046, 6078), 'numpy.random.random', 'np.random.random', (['[3, 3, 56, 56]'], {}), '([3, 3, 56, 56])\n', (6062, 6078), True, 'import numpy as np\n'), ((7022, 7054), 'numpy.random.random', 'np.random.random', (['[4, 3, 56, 56]'], {}), '([4, 3, 56, 56])\n', (7038, 7054), True, 'import numpy as np\n'), ((7998, 8030), 'numpy.random.random', 'np.random.random', (['[4, 3, 56, 56]'], {}), '([4, 3, 56, 56])\n', (8014, 8030), True, 'import numpy as np\n'), ((8968, 9000), 'numpy.random.random', 'np.random.random', (['[4, 3, 56, 56]'], {}), '([4, 3, 56, 56])\n', (8984, 9000), True, 'import numpy as np\n')] |
SeHwanJoo/mmdetection_vinbig | configs/vinbig/detectors_resnext.py | 9a27d2b5cd8b3ec9ed1a94e4704a7c883f15dce3 | _base_ = [
'../_base_/models/cascade_rcnn_r50_fpn.py',
'./dataset_base.py',
'./scheduler_base.py',
'../_base_/default_runtime.py'
]
model = dict(
pretrained='open-mmlab://resnext101_32x4d',
backbone=dict(
type='DetectoRS_ResNeXt',
pretrained='open-mmlab://resnext101_32x4d',
depth=101,
groups=32,
base_width=4,
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True,
plugins=[
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
in_channels=512,
position='after_conv2')
]
),
neck=dict(
type='RFP',
rfp_steps=2,
aspp_out_channels=64,
aspp_dilations=(1, 3, 6, 1),
rfp_backbone=dict(
rfp_inplanes=256,
type='DetectoRS_ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
pretrained='open-mmlab://resnext101_32x4d',
style='pytorch')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
num_classes=14
),
dict(
type='Shared2FCBBoxHead',
num_classes=14
),
dict(
type='Shared2FCBBoxHead',
num_classes=14
)
]
),
test_cfg=dict(
rpn=dict(
nms_thr=0.7
),
rcnn=dict(
score_thr=0.0,
nms=dict(type='nms', iou_threshold=0.4)
)
)
)
| [] |
johnchase/scikit-bio | skbio/draw/tests/test_distributions.py | 340e6153b6c93053d923d344e63481860e03731e | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
from skbio.draw import boxplots, grouped_distributions
from skbio.draw._distributions import (
_calc_data_point_locations, _calc_data_point_ticks, _color_box_plot,
_create_legend, _get_distribution_markers, _is_single_matplotlib_color,
_plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options,
_set_figure_size, _validate_input, _validate_x_values)
class DistributionsTests(TestCase):
def setUp(self):
# Test null data list.
self.Null = None
# Test empty data list.
self.Empty = []
# Test nested empty data list.
self.EmptyNested = [[]]
# Test nested empty data list (for bar/scatter plots).
self.EmptyDeeplyNested = [[[]]]
# Test invalid number of samples in data list (for bar/scatter plots).
self.InvalidNumSamples = [[[1, 2, 3, 4, 5]],
[[4, 5, 6, 7, 8], [2, 3, 2]],
[[4, 7, 10, 33, 32, 6, 7, 8]]]
# Test valid data with three samples and four data points
# (for bar/scatter plots).
self.ValidTypicalData = [[[1.0, 2, 3.5, 5], [2, 3, 5, 6], [2, 3, 8]],
[[4, 7, 8], [8, 9, 10, 11], [9.0, 4, 1, 1]],
[[4, 33, 32, 6, 8], [5, 4, 8, 13], [1, 1, 2]],
[[2, 2, 2, 2], [3, 9, 8], [2, 1, 6, 7, 4, 5]]]
# Test valid data with one sample (for bar/scatter plots).
self.ValidSingleSampleData = [[[1, 2, 3, 4, 5]],
[[4, 5, 6, 7, 8]],
[[4, 7, 10, 33, 32, 6, 7, 8]]]
# Test typical data to be plotted by the boxplot function.
self.ValidTypicalBoxData = [[3.4, 10, 11.67, 12.0, 2, 2, 99.99],
[2.3, 4, 5, 88, 9, 10, 11, 1, 0, 3, -8],
[2, 9, 7, 5, 6]]
def tearDown(self):
# We get a warning from mpl if we don't clean up our figures.
plt.close('all')
def test_validate_input_null(self):
with npt.assert_raises(ValueError):
_validate_input(self.Null, None, None, None)
def test_validate_input_empty(self):
with npt.assert_raises(ValueError):
_validate_input(self.Empty, None, None, None)
def test_validate_input_empty_nested(self):
with npt.assert_raises(ValueError):
_validate_input(self.EmptyNested, None, None, None)
def test_validate_input_empty_deeply_nested(self):
num_points, num_samples = _validate_input(self.EmptyDeeplyNested,
None, None, None)
self.assertEqual(num_points, 1)
self.assertEqual(num_samples, 1)
def test_validate_input_empty_point(self):
with npt.assert_raises(ValueError):
_validate_input([[[1, 2, 3], [4, 5]], []], None, None, None)
def test_validate_input_invalid_num_samples(self):
with npt.assert_raises(ValueError):
_validate_input(self.InvalidNumSamples, None, None, None)
def test_validate_input_invalid_data_point_names(self):
with npt.assert_raises(ValueError):
_validate_input(self.ValidSingleSampleData, None, ["T0", "T1"],
None)
def test_validate_input_invalid_sample_names(self):
with npt.assert_raises(ValueError):
_validate_input(self.ValidSingleSampleData, None, None,
["Men", "Women"])
def test_validate_input_all_valid_input(self):
self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4, 8],
["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"]),
(4, 3))
def test_validate_x_values_invalid_x_values(self):
with npt.assert_raises(ValueError):
_validate_x_values([1, 2, 3, 4], ["T0", "T1", "T2"],
len(self.ValidSingleSampleData))
def test_validate_x_values_invalid_x_tick_labels(self):
with npt.assert_raises(ValueError):
_validate_x_values(None, ["T0"], len(self.ValidSingleSampleData))
def test_validate_x_values_nonnumber_x_values(self):
with npt.assert_raises(ValueError):
_validate_x_values(["foo", 2, 3], None,
len(self.ValidSingleSampleData))
def test_validate_x_values_valid_x_values(self):
_validate_x_values([1, 2.0, 3], None, 3)
def test_get_distribution_markers_null_marker_list(self):
self.assertEqual(_get_distribution_markers('colors', None, 5),
['b', 'g', 'r', 'c', 'm'])
def test_get_distribution_markers_empty_marker_list(self):
self.assertEqual(_get_distribution_markers('colors', None, 4),
['b', 'g', 'r', 'c'])
def test_get_distribution_markers_insufficient_markers(self):
self.assertEqual(npt.assert_warns(RuntimeWarning,
_get_distribution_markers,
'colors', None, 10),
['b', 'g', 'r', 'c', 'm', 'y', 'w', 'b', 'g', 'r'])
self.assertEqual(npt.assert_warns(RuntimeWarning,
_get_distribution_markers,
'symbols', ['^', '>', '<'], 5),
['^', '>', '<', '^', '>'])
def test_get_distribution_markers_bad_marker_type(self):
with npt.assert_raises(ValueError):
_get_distribution_markers('shapes', [], 3)
def test_get_distribution_markers_zero_markers(self):
self.assertEqual(_get_distribution_markers('symbols', None, 0), [])
self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), [])
def test_get_distribution_markers_negative_num_markers(self):
with npt.assert_raises(ValueError):
_get_distribution_markers('symbols', [], -1)
def test_plot_bar_data(self):
fig, ax = plt.subplots()
result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'stdv')
self.assertEqual(result[0].__class__.__name__, "Rectangle")
self.assertEqual(len(result), 1)
self.assertAlmostEqual(result[0].get_width(), 0.5)
self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
self.assertAlmostEqual(result[0].get_height(), 2.0)
fig, ax = plt.subplots()
result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'sem')
self.assertEqual(result[0].__class__.__name__, "Rectangle")
self.assertEqual(len(result), 1)
self.assertAlmostEqual(result[0].get_width(), 0.5)
self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
self.assertAlmostEqual(result[0].get_height(), 2.0)
def test_plot_bar_data_bad_error_bar_type(self):
fig, ax = plt.subplots()
with npt.assert_raises(ValueError):
_plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'var')
def test_plot_bar_data_empty(self):
fig, ax = plt.subplots()
result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'stdv')
self.assertTrue(result is None)
fig, ax = plt.subplots()
result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'sem')
self.assertTrue(result is None)
def test_plot_scatter_data(self):
fig, ax = plt.subplots()
result = _plot_scatter_data(ax, [1, 2, 3], '^', 0.77, 1, 1.5, 'stdv')
self.assertEqual(result.get_sizes(), 20)
def test_plot_scatter_data_empty(self):
fig, ax = plt.subplots()
result = _plot_scatter_data(ax, [], '^', 0.77, 1, 1.5, 'stdv')
self.assertTrue(result is None)
def test_plot_box_data(self):
fig, ax = plt.subplots()
result = _plot_box_data(ax, [0, 0, 7, 8, -3, 44], 'blue', 0.33, 55,
1.5, 'stdv')
self.assertEqual(result.__class__.__name__, "dict")
self.assertEqual(len(result['boxes']), 1)
self.assertEqual(len(result['medians']), 1)
self.assertEqual(len(result['whiskers']), 2)
# mpl < 1.4.0 creates two Line2D instances, mpl 1.4.0 creates one,
# though the resulting plot looks identical between the two versions.
# see:
# https://github.com/pydata/pandas/issues/8382#issuecomment-56840974
# https://github.com/matplotlib/matplotlib/issues/3544
self.assertTrue(len(result['fliers']) == 1 or
len(result['fliers']) == 2)
self.assertEqual(len(result['caps']), 2)
def test_plot_box_data_empty(self):
fig, ax = plt.subplots()
result = _plot_box_data(ax, [], 'blue', 0.33, 55, 1.5, 'stdv')
self.assertTrue(result is None)
def test_calc_data_point_locations_invalid_x_values(self):
with npt.assert_raises(ValueError):
_calc_data_point_locations(3, [1, 10.5])
def test_calc_data_point_locations_default_spacing(self):
locs = _calc_data_point_locations(4)
np.testing.assert_allclose(locs, [1, 2, 3, 4])
def test_calc_data_point_locations_custom_spacing(self):
# Scaling down from 3..12 to 1..4.
locs = _calc_data_point_locations(4, [3, 4, 10, 12])
np.testing.assert_allclose(locs,
np.array([1, 1.33333333, 3.33333333, 4]))
# Sorted order shouldn't affect scaling.
locs = _calc_data_point_locations(4, [4, 3, 12, 10])
np.testing.assert_allclose(locs,
np.array([1.33333333, 1, 4, 3.33333333]))
# Scaling up from 0.001..0.87 to 1..3.
locs = _calc_data_point_locations(3, [0.001, 0.2543, 0.87])
np.testing.assert_allclose(locs,
np.array([1, 1.58296893, 3]))
def test_calc_data_point_ticks(self):
ticks = _calc_data_point_ticks(np.array([1, 5, 9, 11]), 1, 0.5, False)
np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25, 11.25])
ticks = _calc_data_point_ticks(np.array([0]), 3, 0.5, False)
np.testing.assert_allclose(ticks, [0.75])
def test_set_axes_options(self):
fig, ax = plt.subplots()
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_tick_labels=["T0", "T1"])
self.assertEqual(ax.get_title(), "Plot Title")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0")
self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1")
def test_set_axes_options_ylim(self):
fig, ax = plt.subplots()
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_tick_labels=["T0", "T1", "T2"], y_min=0, y_max=1)
self.assertEqual(ax.get_title(), "Plot Title")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0")
self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1")
self.assertEqual(ax.get_ylim(), (0.0, 1.0))
def test_set_axes_options_x_values_as_tick_labels(self):
fig, ax = plt.subplots()
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_values=[42, 45, 800])
self.assertEqual(ax.get_title(), "Plot Title")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(ax.get_xticklabels()[0].get_text(), '42')
self.assertEqual(ax.get_xticklabels()[1].get_text(), '45')
self.assertEqual(ax.get_xticklabels()[2].get_text(), '800')
def test_set_axes_options_bad_ylim(self):
fig, ax = plt.subplots()
with npt.assert_raises(ValueError):
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_tick_labels=["T0", "T1", "T2"], y_min='car',
y_max=30)
def test_set_axes_options_invalid_x_tick_labels_orientation(self):
fig, ax = plt.subplots()
with npt.assert_raises(ValueError):
_set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label",
x_tick_labels=["T0", "T1"],
x_tick_labels_orientation='brofist')
def test_create_legend(self):
fig, ax = plt.subplots()
_create_legend(ax, ['b', 'r'], ['dist1', 'dist2'], 'colors')
self.assertEqual(len(ax.get_legend().get_texts()), 2)
fig, ax = plt.subplots()
_create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'],
'symbols')
self.assertEqual(len(ax.get_legend().get_texts()), 3)
def test_create_legend_invalid_input(self):
fig, ax = plt.subplots()
with npt.assert_raises(ValueError):
_create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2'], 'symbols')
with npt.assert_raises(ValueError):
_create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'],
'foo')
def test_grouped_distributions_bar(self):
fig = grouped_distributions('bar', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"],
['b', 'r', 'g'], "x-axis label",
"y-axis label", "Test")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 4)
np.testing.assert_allclose(ax.get_xticks(),
[1.1125, 2.0125, 3.8125, 4.1125])
def test_grouped_distributions_insufficient_colors(self):
args = ('bar', self.ValidTypicalData, [1, 4, 10, 11],
["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
['b', 'r'], "x-axis label", "y-axis label", "Test")
npt.assert_warns(RuntimeWarning,
grouped_distributions,
*args)
def test_grouped_distributions_scatter(self):
fig = grouped_distributions('scatter', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"],
['^', '>', '<'], "x-axis label",
"y-axis label", "Test")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 4)
np.testing.assert_allclose(ax.get_xticks(),
[1.075, 1.975, 3.775, 4.075])
def test_grouped_distributions_insufficient_symbols(self):
args = ('scatter', self.ValidTypicalData, [1, 4, 10, 11],
["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
['^'], "x-axis label", "y-axis label", "Test")
npt.assert_warns(RuntimeWarning, grouped_distributions, *args)
def test_grouped_distributions_empty_marker_list(self):
grouped_distributions('scatter', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"], [],
"x-axis label", "y-axis label", "Test")
def test_grouped_distributions_box(self):
fig = grouped_distributions('box', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"],
['b', 'g', 'y'], "x-axis label",
"y-axis label", "Test")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 4)
np.testing.assert_allclose(ax.get_xticks(),
[1.075, 1.975, 3.775, 4.075])
def test_grouped_distributions_error(self):
with npt.assert_raises(ValueError):
grouped_distributions('pie', self.ValidTypicalData,
[1, 4, 10, 11], ["T0", "T1", "T2", "T3"],
["Infants", "Children", "Teens"],
['b', 'g', 'y'],
"x-axis label", "y-axis label", "Test")
def test_grouped_distributions_negative_distribution_width(self):
args = ('box', self.ValidTypicalData, [1, 4, 10, 11],
["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"],
['b', 'g', 'y'], "x-axis label", "y-axis label", "Test")
with self.assertRaises(ValueError):
grouped_distributions(*args, distribution_width=0)
with self.assertRaises(ValueError):
grouped_distributions(*args, distribution_width=-42)
def test_boxplots(self):
fig = boxplots(self.ValidTypicalBoxData, [1, 4, 10],
["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
"y-axis label",
legend=(('blue', 'red'), ('foo', 'bar')))
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 3)
self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
def test_boxplots_empty_distributions(self):
fig = boxplots([[1, 2, 3], [], [4, 5, 6]], [1, 4, 10],
["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
"y-axis label")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 3)
self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
# second distribution (empty) should have nans since it is hidden.
# boxplots in mpl < 1.4.0 have 8 lines per boxplot, while mpl 1.4.0 has
# 7. in either case, the line at index 8 should have a nan for its y
# value
lines = ax.get_lines()
self.assertTrue(np.isnan(lines[8].get_xydata()[0][1]))
# line in first distribution should *not* have nan for its y value
self.assertFalse(np.isnan(lines[0].get_xydata()[0][1]))
# All distributions are empty.
fig = boxplots([[], [], []], [1, 4, 10],
["Data 1", "Data 2", "Data 3"], "Test", "x-axis label",
"y-axis label")
ax = fig.get_axes()[0]
self.assertEqual(ax.get_title(), "Test")
self.assertEqual(ax.get_xlabel(), "x-axis label")
self.assertEqual(ax.get_ylabel(), "y-axis label")
self.assertEqual(len(ax.get_xticklabels()), 3)
self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10]))
lines = ax.get_lines()
self.assertTrue(np.isnan(lines[0].get_xydata()[0][1]))
self.assertTrue(np.isnan(lines[8].get_xydata()[0][1]))
self.assertTrue(np.isnan(lines[16].get_xydata()[0][1]))
def test_boxplots_box_colors(self):
# Coloring works with all empty distributions.
fig = boxplots([[], [], []], box_colors=['blue', 'red', 'yellow'])
ax = fig.get_axes()[0]
self.assertEqual(len(ax.get_xticklabels()), 3)
# patch colors should match what we specified
self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0))
self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0))
# patch location should include at least one nan since the distribution
# is empty, and thus hidden
for patch in ax.patches:
self.assertTrue(np.isnan(patch.xy[0][1]))
fig = boxplots([[], [], []], box_colors='pink')
ax = fig.get_axes()[0]
self.assertEqual(len(ax.get_xticklabels()), 3)
for patch in ax.patches:
npt.assert_almost_equal(
patch.get_facecolor(),
(1.0, 0.7529411764705882, 0.796078431372549, 1.0))
self.assertTrue(np.isnan(patch.xy[0][1]))
# Coloring works with some empty distributions.
fig = boxplots([[], [1, 2, 3.5], []],
box_colors=['blue', 'red', 'yellow'])
ax = fig.get_axes()[0]
self.assertEqual(len(ax.get_xticklabels()), 3)
self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0))
self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0))
self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0))
self.assertTrue(np.isnan(ax.patches[0].xy[0][1]))
self.assertFalse(np.isnan(ax.patches[1].xy[0][1]))
self.assertTrue(np.isnan(ax.patches[2].xy[0][1]))
def test_boxplots_invalid_input(self):
# Non-numeric entries in distribution.
with npt.assert_raises(ValueError):
boxplots([[1, 'foo', 3]])
# Number of colors doesn't match number of distributions.
with npt.assert_raises(ValueError):
boxplots([[1, 2, 3], [], [4, 5, 6]], box_colors=['blue', 'red'])
# Invalid legend.
with npt.assert_raises(ValueError):
boxplots([[1, 2, 3]], legend=('foo', 'bar', 'baz'))
def test_color_box_plot(self):
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
_color_box_plot(ax, box_plot, ['blue', 'w', (1, 1, 0.9)])
# Some colors are None.
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
_color_box_plot(ax, box_plot, ['blue', None, (1, 1, 0.9)])
# All colors are None.
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
_color_box_plot(ax, box_plot, [None, None, None])
def test_color_box_plot_invalid_input(self):
# Invalid color.
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
with npt.assert_raises(ValueError):
_color_box_plot(ax, box_plot, ['red', 'foobarbaz', 'blue'])
# Wrong number of colors.
fig, ax = plt.subplots()
box_plot = plt.boxplot(self.ValidTypicalBoxData)
with npt.assert_raises(ValueError):
_color_box_plot(ax, box_plot, ['blue', (1, 1, 0.9)])
def test_is_single_matplotlib_color(self):
self.assertTrue(_is_single_matplotlib_color('w'))
self.assertTrue(_is_single_matplotlib_color('white'))
self.assertTrue(_is_single_matplotlib_color([1, 1, 1]))
self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1]))
self.assertTrue(_is_single_matplotlib_color((1, 1, 1)))
self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1)))
self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0)))
self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0)))
self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0)))
self.assertFalse(_is_single_matplotlib_color(['w', 'r']))
self.assertFalse(_is_single_matplotlib_color(['w']))
self.assertFalse(_is_single_matplotlib_color(('w',)))
self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),)))
self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),
(0.9, 0.9))))
def test_set_figure_size(self):
fig, ax = plt.subplots()
_set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
x_tick_labels=['foofoofoo', 'barbarbar'],
x_tick_labels_orientation='vertical')
_set_figure_size(fig, 3, 4)
self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4)))
def test_set_figure_size_defaults(self):
fig, ax = plt.subplots()
_set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
x_tick_labels=['foofoofoo', 'barbarbar'],
x_tick_labels_orientation='vertical')
orig_fig_size = fig.get_size_inches()
_set_figure_size(fig)
self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size))
def test_set_figure_size_invalid(self):
fig, ax = plt.subplots()
_set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
x_tick_labels=['foofoofoo', 'barbarbar'],
x_tick_labels_orientation='vertical')
orig_fig_size = fig.get_size_inches()
_set_figure_size(fig, -1, 0)
self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size))
def test_set_figure_size_long_labels(self):
fig, ax = plt.subplots()
_set_axes_options(ax, 'foo', 'x_foo', 'y_foo',
x_tick_labels=['foofoofooooooooooooooooooooooooo'
'oooooooooooooooooooooooooooooooo'
'oooooooooooooooooooooooooooooooo'
'oooo', 'barbarbar'],
x_tick_labels_orientation='vertical')
npt.assert_warns(RuntimeWarning, _set_figure_size, fig, 3, 3)
npt.assert_array_equal(fig.get_size_inches(), (3, 3))
if __name__ == '__main__':
main()
| [((26694, 26700), 'unittest.main', 'main', ([], {}), '()\n', (26698, 26700), False, 'from unittest import TestCase, main\n'), ((2568, 2584), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2577, 2584), True, 'import matplotlib.pyplot as plt\n'), ((3118, 3175), 'skbio.draw._distributions._validate_input', '_validate_input', (['self.EmptyDeeplyNested', 'None', 'None', 'None'], {}), '(self.EmptyDeeplyNested, None, None, None)\n', (3133, 3175), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((5070, 5110), 'skbio.draw._distributions._validate_x_values', '_validate_x_values', (['[1, 2.0, 3]', 'None', '(3)'], {}), '([1, 2.0, 3], None, 3)\n', (5088, 5110), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((6660, 6674), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6672, 6674), True, 'import matplotlib.pyplot as plt\n'), ((6692, 6752), 'skbio.draw._distributions._plot_bar_data', '_plot_bar_data', (['ax', '[1, 2, 3]', '"""red"""', '(0.5)', '(3.75)', '(1.5)', '"""stdv"""'], {}), "(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'stdv')\n", (6706, 6752), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((7080, 7094), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7092, 7094), True, 'import matplotlib.pyplot as plt\n'), ((7112, 7171), 'skbio.draw._distributions._plot_bar_data', '_plot_bar_data', (['ax', '[1, 2, 3]', '"""red"""', '(0.5)', '(3.75)', '(1.5)', '"""sem"""'], {}), "(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'sem')\n", (7126, 7171), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((7552, 7566), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7564, 7566), True, 'import matplotlib.pyplot as plt\n'), ((7742, 7756), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7754, 7756), True, 'import matplotlib.pyplot as plt\n'), ((7774, 7827), 'skbio.draw._distributions._plot_bar_data', '_plot_bar_data', (['ax', '[]', '"""red"""', '(0.5)', '(3.75)', '(1.5)', '"""stdv"""'], {}), "(ax, [], 'red', 0.5, 3.75, 1.5, 'stdv')\n", (7788, 7827), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((7887, 7901), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7899, 7901), True, 'import matplotlib.pyplot as plt\n'), ((7919, 7971), 'skbio.draw._distributions._plot_bar_data', '_plot_bar_data', (['ax', '[]', '"""red"""', '(0.5)', '(3.75)', '(1.5)', '"""sem"""'], {}), "(ax, [], 'red', 0.5, 3.75, 1.5, 'sem')\n", (7933, 7971), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((8069, 8083), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8081, 8083), True, 'import matplotlib.pyplot as plt\n'), ((8101, 8161), 'skbio.draw._distributions._plot_scatter_data', '_plot_scatter_data', (['ax', '[1, 2, 3]', '"""^"""', '(0.77)', '(1)', '(1.5)', '"""stdv"""'], {}), "(ax, [1, 2, 3], '^', 0.77, 1, 1.5, 'stdv')\n", (8119, 8161), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((8274, 8288), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8286, 8288), True, 'import matplotlib.pyplot as plt\n'), ((8306, 8359), 'skbio.draw._distributions._plot_scatter_data', '_plot_scatter_data', (['ax', '[]', '"""^"""', '(0.77)', '(1)', '(1.5)', '"""stdv"""'], {}), "(ax, [], '^', 0.77, 1, 1.5, 'stdv')\n", (8324, 8359), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((8453, 8467), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8465, 8467), True, 'import matplotlib.pyplot as plt\n'), ((8485, 8556), 'skbio.draw._distributions._plot_box_data', '_plot_box_data', (['ax', '[0, 0, 7, 8, -3, 44]', '"""blue"""', '(0.33)', '(55)', '(1.5)', '"""stdv"""'], {}), "(ax, [0, 0, 7, 8, -3, 44], 'blue', 0.33, 55, 1.5, 'stdv')\n", (8499, 8556), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((9332, 9346), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9344, 9346), True, 'import matplotlib.pyplot as plt\n'), ((9364, 9417), 'skbio.draw._distributions._plot_box_data', '_plot_box_data', (['ax', '[]', '"""blue"""', '(0.33)', '(55)', '(1.5)', '"""stdv"""'], {}), "(ax, [], 'blue', 0.33, 55, 1.5, 'stdv')\n", (9378, 9417), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((9697, 9726), 'skbio.draw._distributions._calc_data_point_locations', '_calc_data_point_locations', (['(4)'], {}), '(4)\n', (9723, 9726), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((9735, 9781), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['locs', '[1, 2, 3, 4]'], {}), '(locs, [1, 2, 3, 4])\n', (9761, 9781), True, 'import numpy as np\n'), ((9902, 9947), 'skbio.draw._distributions._calc_data_point_locations', '_calc_data_point_locations', (['(4)', '[3, 4, 10, 12]'], {}), '(4, [3, 4, 10, 12])\n', (9928, 9947), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((10131, 10176), 'skbio.draw._distributions._calc_data_point_locations', '_calc_data_point_locations', (['(4)', '[4, 3, 12, 10]'], {}), '(4, [4, 3, 12, 10])\n', (10157, 10176), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((10358, 10410), 'skbio.draw._distributions._calc_data_point_locations', '_calc_data_point_locations', (['(3)', '[0.001, 0.2543, 0.87]'], {}), '(3, [0.001, 0.2543, 0.87])\n', (10384, 10410), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((10647, 10707), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ticks', '[1.25, 5.25, 9.25, 11.25]'], {}), '(ticks, [1.25, 5.25, 9.25, 11.25])\n', (10673, 10707), True, 'import numpy as np\n'), ((10786, 10827), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ticks', '[0.75]'], {}), '(ticks, [0.75])\n', (10812, 10827), True, 'import numpy as np\n'), ((10884, 10898), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10896, 10898), True, 'import matplotlib.pyplot as plt\n'), ((10907, 11006), 'skbio.draw._distributions._set_axes_options', '_set_axes_options', (['ax', '"""Plot Title"""', '"""x-axis label"""', '"""y-axis label"""'], {'x_tick_labels': "['T0', 'T1']"}), "(ax, 'Plot Title', 'x-axis label', 'y-axis label',\n x_tick_labels=['T0', 'T1'])\n", (10924, 11006), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((11337, 11351), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11349, 11351), True, 'import matplotlib.pyplot as plt\n'), ((11360, 11483), 'skbio.draw._distributions._set_axes_options', '_set_axes_options', (['ax', '"""Plot Title"""', '"""x-axis label"""', '"""y-axis label"""'], {'x_tick_labels': "['T0', 'T1', 'T2']", 'y_min': '(0)', 'y_max': '(1)'}), "(ax, 'Plot Title', 'x-axis label', 'y-axis label',\n x_tick_labels=['T0', 'T1', 'T2'], y_min=0, y_max=1)\n", (11377, 11483), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((11885, 11899), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11897, 11899), True, 'import matplotlib.pyplot as plt\n'), ((11908, 12003), 'skbio.draw._distributions._set_axes_options', '_set_axes_options', (['ax', '"""Plot Title"""', '"""x-axis label"""', '"""y-axis label"""'], {'x_values': '[42, 45, 800]'}), "(ax, 'Plot Title', 'x-axis label', 'y-axis label',\n x_values=[42, 45, 800])\n", (11925, 12003), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((12407, 12421), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12419, 12421), True, 'import matplotlib.pyplot as plt\n'), ((12753, 12767), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12765, 12767), True, 'import matplotlib.pyplot as plt\n'), ((13070, 13084), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (13082, 13084), True, 'import matplotlib.pyplot as plt\n'), ((13093, 13153), 'skbio.draw._distributions._create_legend', '_create_legend', (['ax', "['b', 'r']", "['dist1', 'dist2']", '"""colors"""'], {}), "(ax, ['b', 'r'], ['dist1', 'dist2'], 'colors')\n", (13107, 13153), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((13235, 13249), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (13247, 13249), True, 'import matplotlib.pyplot as plt\n'), ((13258, 13333), 'skbio.draw._distributions._create_legend', '_create_legend', (['ax', "['^', '<', '>']", "['dist1', 'dist2', 'dist3']", '"""symbols"""'], {}), "(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'symbols')\n", (13272, 13333), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((13486, 13500), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (13498, 13500), True, 'import matplotlib.pyplot as plt\n'), ((13840, 14032), 'skbio.draw.grouped_distributions', 'grouped_distributions', (['"""bar"""', 'self.ValidTypicalData', '[1, 4, 10, 11]', "['T0', 'T1', 'T2', 'T3']", "['Infants', 'Children', 'Teens']", "['b', 'r', 'g']", '"""x-axis label"""', '"""y-axis label"""', '"""Test"""'], {}), "('bar', self.ValidTypicalData, [1, 4, 10, 11], ['T0',\n 'T1', 'T2', 'T3'], ['Infants', 'Children', 'Teens'], ['b', 'r', 'g'],\n 'x-axis label', 'y-axis label', 'Test')\n", (13861, 14032), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((14819, 14881), 'numpy.testing.assert_warns', 'npt.assert_warns', (['RuntimeWarning', 'grouped_distributions', '*args'], {}), '(RuntimeWarning, grouped_distributions, *args)\n', (14835, 14881), True, 'import numpy.testing as npt\n'), ((14997, 15194), 'skbio.draw.grouped_distributions', 'grouped_distributions', (['"""scatter"""', 'self.ValidTypicalData', '[1, 4, 10, 11]', "['T0', 'T1', 'T2', 'T3']", "['Infants', 'Children', 'Teens']", "['^', '>', '<']", '"""x-axis label"""', '"""y-axis label"""', '"""Test"""'], {}), "('scatter', self.ValidTypicalData, [1, 4, 10, 11], [\n 'T0', 'T1', 'T2', 'T3'], ['Infants', 'Children', 'Teens'], ['^', '>',\n '<'], 'x-axis label', 'y-axis label', 'Test')\n", (15018, 15194), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((15976, 16038), 'numpy.testing.assert_warns', 'npt.assert_warns', (['RuntimeWarning', 'grouped_distributions', '*args'], {}), '(RuntimeWarning, grouped_distributions, *args)\n', (15992, 16038), True, 'import numpy.testing as npt\n'), ((16108, 16292), 'skbio.draw.grouped_distributions', 'grouped_distributions', (['"""scatter"""', 'self.ValidTypicalData', '[1, 4, 10, 11]', "['T0', 'T1', 'T2', 'T3']", "['Infants', 'Children', 'Teens']", '[]', '"""x-axis label"""', '"""y-axis label"""', '"""Test"""'], {}), "('scatter', self.ValidTypicalData, [1, 4, 10, 11], [\n 'T0', 'T1', 'T2', 'T3'], ['Infants', 'Children', 'Teens'], [],\n 'x-axis label', 'y-axis label', 'Test')\n", (16129, 16292), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((16435, 16627), 'skbio.draw.grouped_distributions', 'grouped_distributions', (['"""box"""', 'self.ValidTypicalData', '[1, 4, 10, 11]', "['T0', 'T1', 'T2', 'T3']", "['Infants', 'Children', 'Teens']", "['b', 'g', 'y']", '"""x-axis label"""', '"""y-axis label"""', '"""Test"""'], {}), "('box', self.ValidTypicalData, [1, 4, 10, 11], ['T0',\n 'T1', 'T2', 'T3'], ['Infants', 'Children', 'Teens'], ['b', 'g', 'y'],\n 'x-axis label', 'y-axis label', 'Test')\n", (16456, 16627), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((18102, 18270), 'skbio.draw.boxplots', 'boxplots', (['self.ValidTypicalBoxData', '[1, 4, 10]', "['Data 1', 'Data 2', 'Data 3']", '"""Test"""', '"""x-axis label"""', '"""y-axis label"""'], {'legend': "(('blue', 'red'), ('foo', 'bar'))"}), "(self.ValidTypicalBoxData, [1, 4, 10], ['Data 1', 'Data 2',\n 'Data 3'], 'Test', 'x-axis label', 'y-axis label', legend=(('blue',\n 'red'), ('foo', 'bar')))\n", (18110, 18270), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((18716, 18840), 'skbio.draw.boxplots', 'boxplots', (['[[1, 2, 3], [], [4, 5, 6]]', '[1, 4, 10]', "['Data 1', 'Data 2', 'Data 3']", '"""Test"""', '"""x-axis label"""', '"""y-axis label"""'], {}), "([[1, 2, 3], [], [4, 5, 6]], [1, 4, 10], ['Data 1', 'Data 2',\n 'Data 3'], 'Test', 'x-axis label', 'y-axis label')\n", (18724, 18840), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((19739, 19849), 'skbio.draw.boxplots', 'boxplots', (['[[], [], []]', '[1, 4, 10]', "['Data 1', 'Data 2', 'Data 3']", '"""Test"""', '"""x-axis label"""', '"""y-axis label"""'], {}), "([[], [], []], [1, 4, 10], ['Data 1', 'Data 2', 'Data 3'], 'Test',\n 'x-axis label', 'y-axis label')\n", (19747, 19849), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((20544, 20604), 'skbio.draw.boxplots', 'boxplots', (['[[], [], []]'], {'box_colors': "['blue', 'red', 'yellow']"}), "([[], [], []], box_colors=['blue', 'red', 'yellow'])\n", (20552, 20604), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((21197, 21238), 'skbio.draw.boxplots', 'boxplots', (['[[], [], []]'], {'box_colors': '"""pink"""'}), "([[], [], []], box_colors='pink')\n", (21205, 21238), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((21626, 21695), 'skbio.draw.boxplots', 'boxplots', (['[[], [1, 2, 3.5], []]'], {'box_colors': "['blue', 'red', 'yellow']"}), "([[], [1, 2, 3.5], []], box_colors=['blue', 'red', 'yellow'])\n", (21634, 21695), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((22764, 22778), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (22776, 22778), True, 'import matplotlib.pyplot as plt\n'), ((22798, 22835), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['self.ValidTypicalBoxData'], {}), '(self.ValidTypicalBoxData)\n', (22809, 22835), True, 'import matplotlib.pyplot as plt\n'), ((22844, 22901), 'skbio.draw._distributions._color_box_plot', '_color_box_plot', (['ax', 'box_plot', "['blue', 'w', (1, 1, 0.9)]"], {}), "(ax, box_plot, ['blue', 'w', (1, 1, 0.9)])\n", (22859, 22901), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((22953, 22967), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (22965, 22967), True, 'import matplotlib.pyplot as plt\n'), ((22987, 23024), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['self.ValidTypicalBoxData'], {}), '(self.ValidTypicalBoxData)\n', (22998, 23024), True, 'import matplotlib.pyplot as plt\n'), ((23033, 23091), 'skbio.draw._distributions._color_box_plot', '_color_box_plot', (['ax', 'box_plot', "['blue', None, (1, 1, 0.9)]"], {}), "(ax, box_plot, ['blue', None, (1, 1, 0.9)])\n", (23048, 23091), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((23142, 23156), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (23154, 23156), True, 'import matplotlib.pyplot as plt\n'), ((23176, 23213), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['self.ValidTypicalBoxData'], {}), '(self.ValidTypicalBoxData)\n', (23187, 23213), True, 'import matplotlib.pyplot as plt\n'), ((23222, 23271), 'skbio.draw._distributions._color_box_plot', '_color_box_plot', (['ax', 'box_plot', '[None, None, None]'], {}), '(ax, box_plot, [None, None, None])\n', (23237, 23271), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((23365, 23379), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (23377, 23379), True, 'import matplotlib.pyplot as plt\n'), ((23399, 23436), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['self.ValidTypicalBoxData'], {}), '(self.ValidTypicalBoxData)\n', (23410, 23436), True, 'import matplotlib.pyplot as plt\n'), ((23606, 23620), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (23618, 23620), True, 'import matplotlib.pyplot as plt\n'), ((23640, 23677), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['self.ValidTypicalBoxData'], {}), '(self.ValidTypicalBoxData)\n', (23651, 23677), True, 'import matplotlib.pyplot as plt\n'), ((24882, 24896), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (24894, 24896), True, 'import matplotlib.pyplot as plt\n'), ((24905, 25035), 'skbio.draw._distributions._set_axes_options', '_set_axes_options', (['ax', '"""foo"""', '"""x_foo"""', '"""y_foo"""'], {'x_tick_labels': "['foofoofoo', 'barbarbar']", 'x_tick_labels_orientation': '"""vertical"""'}), "(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo',\n 'barbarbar'], x_tick_labels_orientation='vertical')\n", (24922, 25035), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((25092, 25119), 'skbio.draw._distributions._set_figure_size', '_set_figure_size', (['fig', '(3)', '(4)'], {}), '(fig, 3, 4)\n', (25108, 25119), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((25255, 25269), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (25267, 25269), True, 'import matplotlib.pyplot as plt\n'), ((25278, 25408), 'skbio.draw._distributions._set_axes_options', '_set_axes_options', (['ax', '"""foo"""', '"""x_foo"""', '"""y_foo"""'], {'x_tick_labels': "['foofoofoo', 'barbarbar']", 'x_tick_labels_orientation': '"""vertical"""'}), "(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo',\n 'barbarbar'], x_tick_labels_orientation='vertical')\n", (25295, 25408), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((25511, 25532), 'skbio.draw._distributions._set_figure_size', '_set_figure_size', (['fig'], {}), '(fig)\n', (25527, 25532), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((25674, 25688), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (25686, 25688), True, 'import matplotlib.pyplot as plt\n'), ((25697, 25827), 'skbio.draw._distributions._set_axes_options', '_set_axes_options', (['ax', '"""foo"""', '"""x_foo"""', '"""y_foo"""'], {'x_tick_labels': "['foofoofoo', 'barbarbar']", 'x_tick_labels_orientation': '"""vertical"""'}), "(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo',\n 'barbarbar'], x_tick_labels_orientation='vertical')\n", (25714, 25827), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((25930, 25958), 'skbio.draw._distributions._set_figure_size', '_set_figure_size', (['fig', '(-1)', '(0)'], {}), '(fig, -1, 0)\n', (25946, 25958), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((26104, 26118), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (26116, 26118), True, 'import matplotlib.pyplot as plt\n'), ((26127, 26354), 'skbio.draw._distributions._set_axes_options', '_set_axes_options', (['ax', '"""foo"""', '"""x_foo"""', '"""y_foo"""'], {'x_tick_labels': "['foofoofooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo'\n , 'barbarbar']", 'x_tick_labels_orientation': '"""vertical"""'}), "(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=[\n 'foofoofooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo'\n , 'barbarbar'], x_tick_labels_orientation='vertical')\n", (26144, 26354), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((26537, 26598), 'numpy.testing.assert_warns', 'npt.assert_warns', (['RuntimeWarning', '_set_figure_size', 'fig', '(3)', '(3)'], {}), '(RuntimeWarning, _set_figure_size, fig, 3, 3)\n', (26553, 26598), True, 'import numpy.testing as npt\n'), ((2639, 2668), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (2656, 2668), True, 'import numpy.testing as npt\n'), ((2682, 2726), 'skbio.draw._distributions._validate_input', '_validate_input', (['self.Null', 'None', 'None', 'None'], {}), '(self.Null, None, None, None)\n', (2697, 2726), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((2782, 2811), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (2799, 2811), True, 'import numpy.testing as npt\n'), ((2825, 2870), 'skbio.draw._distributions._validate_input', '_validate_input', (['self.Empty', 'None', 'None', 'None'], {}), '(self.Empty, None, None, None)\n', (2840, 2870), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((2933, 2962), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (2950, 2962), True, 'import numpy.testing as npt\n'), ((2976, 3027), 'skbio.draw._distributions._validate_input', '_validate_input', (['self.EmptyNested', 'None', 'None', 'None'], {}), '(self.EmptyNested, None, None, None)\n', (2991, 3027), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((3368, 3397), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (3385, 3397), True, 'import numpy.testing as npt\n'), ((3411, 3471), 'skbio.draw._distributions._validate_input', '_validate_input', (['[[[1, 2, 3], [4, 5]], []]', 'None', 'None', 'None'], {}), '([[[1, 2, 3], [4, 5]], []], None, None, None)\n', (3426, 3471), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((3541, 3570), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (3558, 3570), True, 'import numpy.testing as npt\n'), ((3584, 3641), 'skbio.draw._distributions._validate_input', '_validate_input', (['self.InvalidNumSamples', 'None', 'None', 'None'], {}), '(self.InvalidNumSamples, None, None, None)\n', (3599, 3641), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((3716, 3745), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (3733, 3745), True, 'import numpy.testing as npt\n'), ((3759, 3828), 'skbio.draw._distributions._validate_input', '_validate_input', (['self.ValidSingleSampleData', 'None', "['T0', 'T1']", 'None'], {}), "(self.ValidSingleSampleData, None, ['T0', 'T1'], None)\n", (3774, 3828), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((3927, 3956), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (3944, 3956), True, 'import numpy.testing as npt\n'), ((3970, 4043), 'skbio.draw._distributions._validate_input', '_validate_input', (['self.ValidSingleSampleData', 'None', 'None', "['Men', 'Women']"], {}), "(self.ValidSingleSampleData, None, None, ['Men', 'Women'])\n", (3985, 4043), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((4149, 4265), 'skbio.draw._distributions._validate_input', '_validate_input', (['self.ValidTypicalData', '[1, 3, 4, 8]', "['T0', 'T1', 'T2', 'T3']", "['Infants', 'Children', 'Teens']"], {}), "(self.ValidTypicalData, [1, 3, 4, 8], ['T0', 'T1', 'T2',\n 'T3'], ['Infants', 'Children', 'Teens'])\n", (4164, 4265), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((4447, 4476), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (4464, 4476), True, 'import numpy.testing as npt\n'), ((4681, 4710), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (4698, 4710), True, 'import numpy.testing as npt\n'), ((4861, 4890), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (4878, 4890), True, 'import numpy.testing as npt\n'), ((5199, 5243), 'skbio.draw._distributions._get_distribution_markers', '_get_distribution_markers', (['"""colors"""', 'None', '(5)'], {}), "('colors', None, 5)\n", (5224, 5243), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((5386, 5430), 'skbio.draw._distributions._get_distribution_markers', '_get_distribution_markers', (['"""colors"""', 'None', '(4)'], {}), "('colors', None, 4)\n", (5411, 5430), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((5571, 5650), 'numpy.testing.assert_warns', 'npt.assert_warns', (['RuntimeWarning', '_get_distribution_markers', '"""colors"""', 'None', '(10)'], {}), "(RuntimeWarning, _get_distribution_markers, 'colors', None, 10)\n", (5587, 5650), True, 'import numpy.testing as npt\n'), ((5838, 5932), 'numpy.testing.assert_warns', 'npt.assert_warns', (['RuntimeWarning', '_get_distribution_markers', '"""symbols"""', "['^', '>', '<']", '(5)'], {}), "(RuntimeWarning, _get_distribution_markers, 'symbols', ['^',\n '>', '<'], 5)\n", (5854, 5932), True, 'import numpy.testing as npt\n'), ((6141, 6170), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (6158, 6170), True, 'import numpy.testing as npt\n'), ((6184, 6226), 'skbio.draw._distributions._get_distribution_markers', '_get_distribution_markers', (['"""shapes"""', '[]', '(3)'], {}), "('shapes', [], 3)\n", (6209, 6226), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((6311, 6356), 'skbio.draw._distributions._get_distribution_markers', '_get_distribution_markers', (['"""symbols"""', 'None', '(0)'], {}), "('symbols', None, 0)\n", (6336, 6356), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((6387, 6433), 'skbio.draw._distributions._get_distribution_markers', '_get_distribution_markers', (['"""symbols"""', "['^']", '(0)'], {}), "('symbols', ['^'], 0)\n", (6412, 6433), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((6519, 6548), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (6536, 6548), True, 'import numpy.testing as npt\n'), ((6562, 6606), 'skbio.draw._distributions._get_distribution_markers', '_get_distribution_markers', (['"""symbols"""', '[]', '(-1)'], {}), "('symbols', [], -1)\n", (6587, 6606), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((7580, 7609), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (7597, 7609), True, 'import numpy.testing as npt\n'), ((7623, 7682), 'skbio.draw._distributions._plot_bar_data', '_plot_bar_data', (['ax', '[1, 2, 3]', '"""red"""', '(0.5)', '(3.75)', '(1.5)', '"""var"""'], {}), "(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'var')\n", (7637, 7682), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((9535, 9564), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (9552, 9564), True, 'import numpy.testing as npt\n'), ((9578, 9618), 'skbio.draw._distributions._calc_data_point_locations', '_calc_data_point_locations', (['(3)', '[1, 10.5]'], {}), '(3, [1, 10.5])\n', (9604, 9618), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((10024, 10064), 'numpy.array', 'np.array', (['[1, 1.33333333, 3.33333333, 4]'], {}), '([1, 1.33333333, 3.33333333, 4])\n', (10032, 10064), True, 'import numpy as np\n'), ((10253, 10293), 'numpy.array', 'np.array', (['[1.33333333, 1, 4, 3.33333333]'], {}), '([1.33333333, 1, 4, 3.33333333])\n', (10261, 10293), True, 'import numpy as np\n'), ((10487, 10515), 'numpy.array', 'np.array', (['[1, 1.58296893, 3]'], {}), '([1, 1.58296893, 3])\n', (10495, 10515), True, 'import numpy as np\n'), ((10599, 10622), 'numpy.array', 'np.array', (['[1, 5, 9, 11]'], {}), '([1, 5, 9, 11])\n', (10607, 10622), True, 'import numpy as np\n'), ((10748, 10761), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (10756, 10761), True, 'import numpy as np\n'), ((12435, 12464), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (12452, 12464), True, 'import numpy.testing as npt\n'), ((12478, 12606), 'skbio.draw._distributions._set_axes_options', '_set_axes_options', (['ax', '"""Plot Title"""', '"""x-axis label"""', '"""y-axis label"""'], {'x_tick_labels': "['T0', 'T1', 'T2']", 'y_min': '"""car"""', 'y_max': '(30)'}), "(ax, 'Plot Title', 'x-axis label', 'y-axis label',\n x_tick_labels=['T0', 'T1', 'T2'], y_min='car', y_max=30)\n", (12495, 12606), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((12781, 12810), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (12798, 12810), True, 'import numpy.testing as npt\n'), ((12824, 12960), 'skbio.draw._distributions._set_axes_options', '_set_axes_options', (['ax', '"""Plot Title"""', '"""x-axis label"""', '"""y-axis label"""'], {'x_tick_labels': "['T0', 'T1']", 'x_tick_labels_orientation': '"""brofist"""'}), "(ax, 'Plot Title', 'x-axis label', 'y-axis label',\n x_tick_labels=['T0', 'T1'], x_tick_labels_orientation='brofist')\n", (12841, 12960), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((13514, 13543), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (13531, 13543), True, 'import numpy.testing as npt\n'), ((13557, 13623), 'skbio.draw._distributions._create_legend', '_create_legend', (['ax', "['^', '<', '>']", "['dist1', 'dist2']", '"""symbols"""'], {}), "(ax, ['^', '<', '>'], ['dist1', 'dist2'], 'symbols')\n", (13571, 13623), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((13637, 13666), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (13654, 13666), True, 'import numpy.testing as npt\n'), ((13680, 13751), 'skbio.draw._distributions._create_legend', '_create_legend', (['ax', "['^', '<', '>']", "['dist1', 'dist2', 'dist3']", '"""foo"""'], {}), "(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'foo')\n", (13694, 13751), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((17194, 17223), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (17211, 17223), True, 'import numpy.testing as npt\n'), ((17237, 17429), 'skbio.draw.grouped_distributions', 'grouped_distributions', (['"""pie"""', 'self.ValidTypicalData', '[1, 4, 10, 11]', "['T0', 'T1', 'T2', 'T3']", "['Infants', 'Children', 'Teens']", "['b', 'g', 'y']", '"""x-axis label"""', '"""y-axis label"""', '"""Test"""'], {}), "('pie', self.ValidTypicalData, [1, 4, 10, 11], ['T0',\n 'T1', 'T2', 'T3'], ['Infants', 'Children', 'Teens'], ['b', 'g', 'y'],\n 'x-axis label', 'y-axis label', 'Test')\n", (17258, 17429), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((17897, 17947), 'skbio.draw.grouped_distributions', 'grouped_distributions', (['*args'], {'distribution_width': '(0)'}), '(*args, distribution_width=0)\n', (17918, 17947), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((18005, 18057), 'skbio.draw.grouped_distributions', 'grouped_distributions', (['*args'], {'distribution_width': '(-42)'}), '(*args, distribution_width=-42)\n', (18026, 18057), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((22063, 22095), 'numpy.isnan', 'np.isnan', (['ax.patches[0].xy[0][1]'], {}), '(ax.patches[0].xy[0][1])\n', (22071, 22095), True, 'import numpy as np\n'), ((22122, 22154), 'numpy.isnan', 'np.isnan', (['ax.patches[1].xy[0][1]'], {}), '(ax.patches[1].xy[0][1])\n', (22130, 22154), True, 'import numpy as np\n'), ((22180, 22212), 'numpy.isnan', 'np.isnan', (['ax.patches[2].xy[0][1]'], {}), '(ax.patches[2].xy[0][1])\n', (22188, 22212), True, 'import numpy as np\n'), ((22318, 22347), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (22335, 22347), True, 'import numpy.testing as npt\n'), ((22361, 22386), 'skbio.draw.boxplots', 'boxplots', (["[[1, 'foo', 3]]"], {}), "([[1, 'foo', 3]])\n", (22369, 22386), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((22467, 22496), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (22484, 22496), True, 'import numpy.testing as npt\n'), ((22510, 22574), 'skbio.draw.boxplots', 'boxplots', (['[[1, 2, 3], [], [4, 5, 6]]'], {'box_colors': "['blue', 'red']"}), "([[1, 2, 3], [], [4, 5, 6]], box_colors=['blue', 'red'])\n", (22518, 22574), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((22615, 22644), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (22632, 22644), True, 'import numpy.testing as npt\n'), ((22658, 22709), 'skbio.draw.boxplots', 'boxplots', (['[[1, 2, 3]]'], {'legend': "('foo', 'bar', 'baz')"}), "([[1, 2, 3]], legend=('foo', 'bar', 'baz'))\n", (22666, 22709), False, 'from skbio.draw import boxplots, grouped_distributions\n'), ((23450, 23479), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (23467, 23479), True, 'import numpy.testing as npt\n'), ((23493, 23552), 'skbio.draw._distributions._color_box_plot', '_color_box_plot', (['ax', 'box_plot', "['red', 'foobarbaz', 'blue']"], {}), "(ax, box_plot, ['red', 'foobarbaz', 'blue'])\n", (23508, 23552), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((23691, 23720), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError'], {}), '(ValueError)\n', (23708, 23720), True, 'import numpy.testing as npt\n'), ((23734, 23786), 'skbio.draw._distributions._color_box_plot', '_color_box_plot', (['ax', 'box_plot', "['blue', (1, 1, 0.9)]"], {}), "(ax, box_plot, ['blue', (1, 1, 0.9)])\n", (23749, 23786), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((23859, 23891), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (['"""w"""'], {}), "('w')\n", (23886, 23891), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((23917, 23953), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (['"""white"""'], {}), "('white')\n", (23944, 23953), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((23979, 24017), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (24006, 24017), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((24043, 24084), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (24070, 24084), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((24110, 24148), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (24137, 24148), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((24174, 24215), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (24201, 24215), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((24241, 24290), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (['(1.0, 1.0, 1.0, 1.0)'], {}), '((1.0, 1.0, 1.0, 1.0))\n', (24268, 24290), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((24316, 24358), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (['(1.0, 1, 1.0)'], {}), '((1.0, 1, 1.0))\n', (24343, 24358), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((24384, 24426), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (['(2.0, 1, 1.0)'], {}), '((2.0, 1, 1.0))\n', (24411, 24426), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((24454, 24493), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (["['w', 'r']"], {}), "(['w', 'r'])\n", (24481, 24493), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((24520, 24554), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (["['w']"], {}), "(['w'])\n", (24547, 24554), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((24581, 24616), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (["('w',)"], {}), "(('w',))\n", (24608, 24616), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((24643, 24688), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (['((1.0, 1.0, 1),)'], {}), '(((1.0, 1.0, 1),))\n', (24670, 24688), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((24715, 24771), 'skbio.draw._distributions._is_single_matplotlib_color', '_is_single_matplotlib_color', (['((1.0, 1.0, 1), (0.9, 0.9))'], {}), '(((1.0, 1.0, 1), (0.9, 0.9)))\n', (24742, 24771), False, 'from skbio.draw._distributions import _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values\n'), ((21156, 21180), 'numpy.isnan', 'np.isnan', (['patch.xy[0][1]'], {}), '(patch.xy[0][1])\n', (21164, 21180), True, 'import numpy as np\n'), ((21529, 21553), 'numpy.isnan', 'np.isnan', (['patch.xy[0][1]'], {}), '(patch.xy[0][1])\n', (21537, 21553), True, 'import numpy as np\n')] |
jjwatts/gigantum-client | packages/gtmapi/lmsrvcore/api/interfaces/__init__.py | 88ce0475fb6880322bdd06d987c494e29064f278 | from lmsrvcore.api.interfaces.user import User
from lmsrvcore.api.interfaces.git import GitCommit, GitRef, GitRepository
| [] |
matthieucoquet/probability | tensorflow_probability/python/bijectors/invert_test.py | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.bijectors import bijector_test_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class InvertBijectorTest(tf.test.TestCase):
"""Tests the correctness of the Y = Invert(bij) transformation."""
def testBijector(self):
for fwd in [
tfb.Identity(),
tfb.Exp(),
tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]),
tfb.Softplus(),
tfb.SoftmaxCentered(),
]:
rev = tfb.Invert(fwd)
self.assertStartsWith(rev.name, "_".join(["invert", fwd.name]))
x = [[[1., 2.],
[2., 3.]]]
self.assertAllClose(
self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x)))
self.assertAllClose(
self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x)))
self.assertAllClose(
self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)),
self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)),
self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1)))
def testScalarCongruency(self):
bijector = tfb.Invert(tfb.Exp())
bijector_test_util.assert_scalar_congruency(
bijector, lower_x=1e-3, upper_x=1.5, eval_func=self.evaluate, rtol=0.05)
def testShapeGetters(self):
bijector = tfb.Invert(
tfb.SoftmaxCentered(validate_args=True))
x = tf.TensorShape([2])
y = tf.TensorShape([1])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
tensorshape_util.as_list(y),
self.evaluate(
bijector.forward_event_shape_tensor(tensorshape_util.as_list(x))))
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
tensorshape_util.as_list(x),
self.evaluate(
bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y))))
def testDocstringExample(self):
exp_gamma_distribution = (
tfd.TransformedDistribution(
distribution=tfd.Gamma(concentration=1., rate=2.),
bijector=tfb.Invert(tfb.Exp())))
self.assertAllEqual(
[],
self.evaluate(
tf.shape(
exp_gamma_distribution.sample(seed=tfp_test_util.test_seed()))))
if __name__ == "__main__":
tf.test.main()
| [((3555, 3569), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (3567, 3569), True, 'import tensorflow.compat.v2 as tf\n'), ((2413, 2534), 'tensorflow_probability.python.bijectors.bijector_test_util.assert_scalar_congruency', 'bijector_test_util.assert_scalar_congruency', (['bijector'], {'lower_x': '(0.001)', 'upper_x': '(1.5)', 'eval_func': 'self.evaluate', 'rtol': '(0.05)'}), '(bijector, lower_x=0.001,\n upper_x=1.5, eval_func=self.evaluate, rtol=0.05)\n', (2456, 2534), False, 'from tensorflow_probability.python.bijectors import bijector_test_util\n'), ((2654, 2673), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[2]'], {}), '([2])\n', (2668, 2673), True, 'import tensorflow.compat.v2 as tf\n'), ((2682, 2701), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[1]'], {}), '([1])\n', (2696, 2701), True, 'import tensorflow.compat.v2 as tf\n'), ((1496, 1510), 'tensorflow_probability.python.bijectors.Identity', 'tfb.Identity', ([], {}), '()\n', (1508, 1510), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((1520, 1529), 'tensorflow_probability.python.bijectors.Exp', 'tfb.Exp', ([], {}), '()\n', (1527, 1529), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((1539, 1590), 'tensorflow_probability.python.bijectors.Affine', 'tfb.Affine', ([], {'shift': '[0.0, 1.0]', 'scale_diag': '[2.0, 3.0]'}), '(shift=[0.0, 1.0], scale_diag=[2.0, 3.0])\n', (1549, 1590), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((1596, 1610), 'tensorflow_probability.python.bijectors.Softplus', 'tfb.Softplus', ([], {}), '()\n', (1608, 1610), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((1620, 1641), 'tensorflow_probability.python.bijectors.SoftmaxCentered', 'tfb.SoftmaxCentered', ([], {}), '()\n', (1639, 1641), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((1662, 1677), 'tensorflow_probability.python.bijectors.Invert', 'tfb.Invert', (['fwd'], {}), '(fwd)\n', (1672, 1677), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((2398, 2407), 'tensorflow_probability.python.bijectors.Exp', 'tfb.Exp', ([], {}), '()\n', (2405, 2407), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((2605, 2644), 'tensorflow_probability.python.bijectors.SoftmaxCentered', 'tfb.SoftmaxCentered', ([], {'validate_args': '(True)'}), '(validate_args=True)\n', (2624, 2644), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((2795, 2822), 'tensorflow_probability.python.internal.tensorshape_util.as_list', 'tensorshape_util.as_list', (['y'], {}), '(y)\n', (2819, 2822), False, 'from tensorflow_probability.python.internal import tensorshape_util\n'), ((3019, 3046), 'tensorflow_probability.python.internal.tensorshape_util.as_list', 'tensorshape_util.as_list', (['x'], {}), '(x)\n', (3043, 3046), False, 'from tensorflow_probability.python.internal import tensorshape_util\n'), ((3278, 3316), 'tensorflow_probability.python.distributions.Gamma', 'tfd.Gamma', ([], {'concentration': '(1.0)', 'rate': '(2.0)'}), '(concentration=1.0, rate=2.0)\n', (3287, 3316), True, 'from tensorflow_probability.python import distributions as tfd\n'), ((2895, 2922), 'tensorflow_probability.python.internal.tensorshape_util.as_list', 'tensorshape_util.as_list', (['x'], {}), '(x)\n', (2919, 2922), False, 'from tensorflow_probability.python.internal import tensorshape_util\n'), ((3119, 3146), 'tensorflow_probability.python.internal.tensorshape_util.as_list', 'tensorshape_util.as_list', (['y'], {}), '(y)\n', (3143, 3146), False, 'from tensorflow_probability.python.internal import tensorshape_util\n'), ((3348, 3357), 'tensorflow_probability.python.bijectors.Exp', 'tfb.Exp', ([], {}), '()\n', (3355, 3357), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((3494, 3519), 'tensorflow_probability.python.internal.test_util.test_seed', 'tfp_test_util.test_seed', ([], {}), '()\n', (3517, 3519), True, 'from tensorflow_probability.python.internal import test_util as tfp_test_util\n')] |
ZSD-tim/dayu_widgets | dayu_widgets/alert.py | 31c2530bdc4161d9311574d9850c2e9471e53072 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.2
# Email : [email protected]
###################################################################
"""
MAlert class.
"""
import six
import functools
from dayu_widgets.avatar import MAvatar
from dayu_widgets.label import MLabel
from dayu_widgets import dayu_theme
from dayu_widgets.tool_button import MToolButton
from dayu_widgets.mixin import property_mixin
from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property
@property_mixin
class MAlert(QWidget):
"""
Alert component for feedback.
Property:
dayu_type: The feedback type with different color container.
dayu_text: The feedback string showed in container.
"""
InfoType = 'info'
SuccessType = 'success'
WarningType = 'warning'
ErrorType = 'error'
def __init__(self, text='', parent=None, flags=Qt.Widget):
super(MAlert, self).__init__(parent, flags)
self.setAttribute(Qt.WA_StyledBackground)
self._icon_label = MAvatar()
self._icon_label.set_dayu_size(dayu_theme.tiny)
self._content_label = MLabel().secondary()
self._close_button = MToolButton().svg('close_line.svg').tiny().icon_only()
self._close_button.clicked.connect(functools.partial(self.setVisible, False))
self._main_lay = QHBoxLayout()
self._main_lay.setContentsMargins(8, 8, 8, 8)
self._main_lay.addWidget(self._icon_label)
self._main_lay.addWidget(self._content_label)
self._main_lay.addStretch()
self._main_lay.addWidget(self._close_button)
self.setLayout(self._main_lay)
self.set_show_icon(True)
self.set_closeable(False)
self._dayu_type = None
self._dayu_text = None
self.set_dayu_type(MAlert.InfoType)
self.set_dayu_text(text)
def set_closeable(self, closeable):
"""Display the close icon button or not."""
self._close_button.setVisible(closeable)
def set_show_icon(self, show_icon):
"""Display the information type icon or not."""
self._icon_label.setVisible(show_icon)
def _set_dayu_text(self):
self._content_label.setText(self._dayu_text)
self.setVisible(bool(self._dayu_text))
def set_dayu_text(self, value):
"""Set the feedback content."""
if isinstance(value, six.string_types):
self._dayu_text = value
else:
raise TypeError("Input argument 'value' should be string type, "
"but get {}".format(type(value)))
self._set_dayu_text()
def _set_dayu_type(self):
self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type),
vars(dayu_theme).get(self._dayu_type + '_color')))
self.style().polish(self)
def set_dayu_type(self, value):
"""Set feedback type."""
if value in [MAlert.InfoType, MAlert.SuccessType, MAlert.WarningType, MAlert.ErrorType]:
self._dayu_type = value
else:
raise ValueError("Input argument 'value' should be one of "
"info/success/warning/error string.")
self._set_dayu_type()
def get_dayu_type(self):
"""
Get MAlert feedback type.
:return: str
"""
return self._dayu_type
def get_dayu_text(self):
"""
Get MAlert feedback message.
:return: six.string_types
"""
return self._dayu_text
dayu_text = Property(six.text_type, get_dayu_text, set_dayu_text)
dayu_type = Property(str, get_dayu_type, set_dayu_type)
def info(self):
"""Set MAlert to InfoType"""
self.set_dayu_type(MAlert.InfoType)
return self
def success(self):
"""Set MAlert to SuccessType"""
self.set_dayu_type(MAlert.SuccessType)
return self
def warning(self):
"""Set MAlert to WarningType"""
self.set_dayu_type(MAlert.WarningType)
return self
def error(self):
"""Set MAlert to ErrorType"""
self.set_dayu_type(MAlert.ErrorType)
return self
def closable(self):
"""Set MAlert closebale is True"""
self.set_closeable(True)
return self
| [((3649, 3702), 'dayu_widgets.qt.Property', 'Property', (['six.text_type', 'get_dayu_text', 'set_dayu_text'], {}), '(six.text_type, get_dayu_text, set_dayu_text)\n', (3657, 3702), False, 'from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property\n'), ((3719, 3762), 'dayu_widgets.qt.Property', 'Property', (['str', 'get_dayu_type', 'set_dayu_type'], {}), '(str, get_dayu_type, set_dayu_type)\n', (3727, 3762), False, 'from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property\n'), ((1116, 1125), 'dayu_widgets.avatar.MAvatar', 'MAvatar', ([], {}), '()\n', (1123, 1125), False, 'from dayu_widgets.avatar import MAvatar\n'), ((1429, 1442), 'dayu_widgets.qt.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1440, 1442), False, 'from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property\n'), ((1360, 1401), 'functools.partial', 'functools.partial', (['self.setVisible', '(False)'], {}), '(self.setVisible, False)\n', (1377, 1401), False, 'import functools\n'), ((1212, 1220), 'dayu_widgets.label.MLabel', 'MLabel', ([], {}), '()\n', (1218, 1220), False, 'from dayu_widgets.label import MLabel\n'), ((1262, 1275), 'dayu_widgets.tool_button.MToolButton', 'MToolButton', ([], {}), '()\n', (1273, 1275), False, 'from dayu_widgets.tool_button import MToolButton\n')] |
byeongal/KMUCP | week03/code05.py | 5bafe02c40aae67fc53d9e6cdcb727929368587e | input_str = input("문자열을 입력해 주세요. >> ")
print("입력받은 문자열의 길이는", len(input_str), "입니다.")
| [] |
bibinvasudev/EBI_Project | jobs/SCH/JB_SALES_HIERARCHY_FLAG_N_SR.py | df2560139e463d68a37e67e0bb683c06fa9ef91b | # SCH1101.sh --> JB_SALES_HIERARCHY_FLAG_N_SR.py
#**************************************************************************************************************
#
# Created by : bibin
# Version : 1.0
#
# Description :
# 1. This script will load the data into 'SALES_HIERARCHY' table based on stream lookups.
#
#
# Initial Creation:
#
# Date (YYYY-MM-DD) Change Description
# ----------------- ------------------
# 2018-11-02 Initial creation
#
#**************************************************************************************************************
# Importing required Lib
from dependencies.spark import start_spark
from dependencies.EbiReadWrite import EbiReadWrite
import logging
import sys
from time import gmtime, strftime
import cx_Oracle
import py4j
import pyspark
# Spark logging
logger = logging.getLogger(__name__)
# Date Formats
start_date = "'"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"'"
log_date =strftime("%Y%m%d", gmtime())
# Job Naming Details
script_name = "SCH1101.SH"
app_name = "JB_SALES_HIERARCHY_FLAG_N_SR"
log_filename = app_name + '_' + log_date + '.log'
# Query for loading invoice table
def query_data(db_schema):
query = """INSERT INTO """+ db_schema +""".SALES_HIERARCHY
(SALES_GEOGRAPHY, SALES_MULTI_AREA, SALES_AREA, SALES_MULTI_REGION, SALES_REGION, SALES_DISTRICT, SALES_TEAM, EMPLOYEE_ID,
SALES_REP_NUMBER, LOGIN_ID, SALES_REP_NAME, SALES_REP_ORG, COMP_PLAN_TYPE_CODE, COMP_PLAN_TITLE, COMP_PLAN_CATEGORY_CODE, COMP_PLAN_DESCRIPTION,
GOAL_CURR_CODE, START_DATE, END_DATE, STATUS_CODE, PARTICIPANT_LEVEL_CODE, SALES_REP_TYPE_CODE, CURRENT_RECORD_FLAG, LAST_HIRE_DATE)
SELECT
B.WW_DIRECT_GEO_DESCRIPTION AS SALES_GEOGRAPHY,
B.MULTI_AREA_DESCRIPTION AS SALES_MULTI_AREA,
B.AREA_DESCRIPTION AS SALES_AREA,
B.MULTI_REGION_DESCRIPTION AS SALES_MULTI_REGION,
SUBSTR(B.REGION_DESCRIPTION,1,50) AS SALES_REGION,
SUBSTR(B.DISTRICT_DESCRIPTION,1,50) AS SALES_DISTRICT,
SUBSTR(B.TEAM_DESCRIPTION,1,50) AS SALES_TEAM,
A.EMPLOYEE_ID,
A.BK_SALES_REP_NUMBER AS SALES_REP_NUMBER,
SUBSTR(A.EMP_SYS_LOGIN_ID,1,10) AS LOGIN_ID,
SUBSTR(A.SALES_REP_NAME,1,50) AS SALES_REP_NAME,
A.ORGANIZATION_NAME AS SALES_REP_ORG,
A.COMP_PLAN_TYPE_CODE,
A.COMP_PLAN_TITLE,
A.COMP_PLAN_CATEGORY_CODE,
A.COMP_PLAN_DESCRIPTION,
NULL AS GOAL_CURR_CODE ,
A.START_DATE,
A.END_DATE,
A.STATUS_CODE,
A.PARTICIPANT_LEVEL_CODE,
SUBSTR(A.SALES_REP_TYPE_CODE,1,5) AS SALES_REP_TYPE_CODE,
A.CURRENT_RECORD_FLAG,
C.RECENT_HIRE_DATE AS LAST_HIRE_DATE
FROM
(
SELECT a.*,ROW_NUMBER() over (partition by BK_SALES_REP_NUMBER ORDER BY END_DATE desc) as RANK
FROM DIMS.SALES_PARTICIPANT a
WHERE
BK_SALES_REP_NUMBER NOT IN (SELECT DISTINCT BK_SALES_REP_NUMBER FROM DIMS.SALES_PARTICIPANT WHERE CURRENT_RECORD_FLAG = 'Y')
AND PARTICIPANT_LEVEL_CODE = 'SR'
ORDER BY BK_SALES_REP_NUMBER,SALES_PARTICIPANT_KEY
) A
INNER JOIN DIMS.SALES_TERR_HIERAR_AS_IS_MV B ON B.TERRITORY_KEY = A.TERRITORY_KEY
LEFT OUTER JOIN
(SELECT LTRIM(BK_EMPLOYEE_ID,'0') BK_EMPLOYEE_ID,RECENT_HIRE_DATE FROM DIMS.WORKER_DETAIL WHERE CURRENT_RECORD_IND = 1 ) C
ON C.BK_EMPLOYEE_ID = A.EMPLOYEE_ID
WHERE RANK = 1"""
return query
# Main method
def main():
try:
src_count = '0'
dest_count = '0'
# start Spark application and get Spark session, logger and config
spark, config = start_spark(
app_name=app_name)
# Create class Object
Ebi_read_write_obj = EbiReadWrite(app_name,spark,config,logger)
# DB prop Key of Source DB
db_prop_key_load = config['DB_PROP_KEY_LOAD']
db_prop_key_extract = config['DB_PROP_KEY_EXTRACT']
db_schema = config['DB_SCHEMA']
log_file = config['LOG_DIR_NAME'] + "/" + log_filename
#SQL Query
query = query_data(db_schema)
# Calling Job Class method --> get_target_data_update()
Ebi_read_write_obj.get_target_data_update(query,db_prop_key_load)
end_date="'"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"'"
data_format = "JOB START DT : "+start_date+" | SCRIPT NAME : "+script_name+" | JOB : "+app_name+" | SRC COUNT : "+src_count+" | TGT COUNT : "+dest_count+" | JOB END DT : "+end_date+" | STATUS : %(message)s"
Ebi_read_write_obj.create_log(data_format,log_file,logger)
logger.info("Success")
Ebi_read_write_obj.job_debugger_print(" \n __main__ " + app_name +" --> Job "+app_name+" Succeed \n")
except Exception as err:
# Write expeption in spark log or console
end_date="'"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"'"
data_format = "JOB START DT : "+start_date+" | SCRIPT NAME : "+script_name+" | JOB : "+app_name+" | SRC COUNT : "+src_count+" | TGT COUNT : "+dest_count+" | JOB END DT : "+end_date+" | STATUS : %(message)s"
Ebi_read_write_obj.create_log(data_format,log_file,logger)
logger.info("[Error] Failed")
Ebi_read_write_obj.job_debugger_print(" \n Job "+app_name+" Failed\n")
logger.error("\n __main__ "+ app_name +" --> Exception-Traceback :: " + str(err))
raise
# Entry point for script
if __name__ == "__main__":
# Calling main() method
main()
| [((874, 901), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (891, 901), False, 'import logging\n'), ((1008, 1016), 'time.gmtime', 'gmtime', ([], {}), '()\n', (1014, 1016), False, 'from time import gmtime, strftime\n'), ((3717, 3747), 'dependencies.spark.start_spark', 'start_spark', ([], {'app_name': 'app_name'}), '(app_name=app_name)\n', (3728, 3747), False, 'from dependencies.spark import start_spark\n'), ((3833, 3878), 'dependencies.EbiReadWrite.EbiReadWrite', 'EbiReadWrite', (['app_name', 'spark', 'config', 'logger'], {}), '(app_name, spark, config, logger)\n', (3845, 3878), False, 'from dependencies.EbiReadWrite import EbiReadWrite\n'), ((965, 973), 'time.gmtime', 'gmtime', ([], {}), '()\n', (971, 973), False, 'from time import gmtime, strftime\n'), ((4421, 4429), 'time.gmtime', 'gmtime', ([], {}), '()\n', (4427, 4429), False, 'from time import gmtime, strftime\n'), ((5025, 5033), 'time.gmtime', 'gmtime', ([], {}), '()\n', (5031, 5033), False, 'from time import gmtime, strftime\n')] |
amanbhandari2002/mythproto | myth/util.py | b03764485dad5178127307a3b3e4ddc508158143 | def decodeLongLong(lst):
high = int(lst[0]) << 32
low = int(lst[1])
if low < 0:
low += 4294967296
if high < 0:
high += 4294967296
return high + low
def encodeLongLong(i):
high = int(i / 4294967296)
low = i - high
return high, low
def parseOk(str):
if str == 'ok':
return True
else:
return False
def printList(lst):
#for i in range(len(lst)):
# print i, '\t', repr(lst[i])
pass
# t is a nine item tuple returned by the time module. This method converts it to
# MythTV's standard representation used on filenames
def encodeTime(t):
ret = ''
for i in t[:-3]:
si = str(i)
if len(si) < 2:
ret += si.zfill(2)
else:
ret += si
return ret
| [] |
openem-team/openem | scripts/tator_tracker.py | 45222c9c77084eacab278da25a8734ae7d43f677 | #!/usr/bin/env python3
import argparse
import openem
import os
import cv2
import numpy as np
from openem.tracking import *
import json
import sys
import datetime
import tator
from pprint import pprint
from collections import defaultdict
import yaml
import math
import subprocess
import sys
def crop_localization(frame_bgr, localization):
img_width = frame_bgr.shape[1]
img_height = frame_bgr.shape[0]
box_x = round(localization['x'] * img_width)
box_y = round(localization['y'] * img_height)
box_width = round(localization['width'] * img_width)
box_height = round(localization['height'] * img_height)
img_crop = frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:]
return img_crop
def join_up_iteration(detections, track_ids):
tracklets = defaultdict(list)
num_tracklets = np.max(track_ids) + 1
assert(len(detections) == len(track_ids))
for d,tid in zip(detections, track_ids):
tracklets[tid].append(d)
return tracklets
def extend_tracklets(tracklets, length):
for track_id,track in tracklets.items():
if len(track) <= 16:
continue
ext_length = min(length,len(track))
sum_h=0.0
sum_w=0.0
track.sort(key=lambda x:x['frame'])
def restore_det(det):
det['x'] = det.get('orig_x',det['x'])
det['y'] = det.get('orig_y',det['y'])
det['width'] = det.get('orig_w',det['width'])
det['height'] = det.get('orig_h',det['height'])
det['orig_x'] = det['x']
det['orig_y'] = det['y']
det['orig_w'] = det['width']
det['orig_h'] = det['height']
restore_det(track[0])
restore_det(track[-1])
for d in track:
sum_h += d['height']
sum_w += d['width']
angle,vel,comps = track_vel(track)
vel_x = comps[0]
vel_y = comps[1]
avg_h = sum_h / len(track)
avg_w = sum_w / len(track)
new_x = min(1,max(0,track[-1]['x']+(vel_x*ext_length)))
new_y = min(1,max(0,track[-1]['y']+(vel_y*ext_length)))
old_x = min(1,max(0,track[0]['x']-(vel_x*ext_length)))
old_y = min(1,max(0,track[0]['y']-(vel_y*ext_length)))
min_x = min(track[-1]['x'],new_x)
min_y = min(track[-1]['y'],new_y)
if min_x > 0 and min_y > 0:
track[-1]['x'] = min_x
track[-1]['y'] = min_y
track[-1]['width'] = min(max(0,abs(new_x-track[-1]['x'])+avg_w),1)
track[-1]['height'] = min(max(0,abs(new_x-track[-1]['y'])+avg_h),1)
else:
track[-1]['width'] = 0
track[-1]['height'] = 0
min_x = min(track[0]['x'],old_x)
min_y = min(track[0]['y'],old_y)
if min_x > 0 and min_y > 0:
track[0]['x'] = min(max(0,min_x),1)
track[0]['y'] = min(max(0,min_y),1)
track[0]['width'] = min(max(abs(old_x-track[0]['x'])+avg_w,0),1)
track[0]['height'] = min(max(abs(old_x-track[0]['y'])+avg_h,0),1)
else:
track[0]['width'] = 0
track[0]['height'] = 0
return tracklets
def split_tracklets(tracklets):
track_ids=[]
detections=[]
for track_id,track in tracklets.items():
for d in track:
track_ids.append(track_id)
detections.append(d)
return detections,track_ids
def trim_tracklets(detections, track_ids, max_length):
tracklets = join_up_iteration(detections, track_ids)
next_track_id = 1
new_tracklets = {}
for track_id,detections in tracklets.items():
new_track_count=math.ceil(len(detections)/max_length)
for i in range(new_track_count):
start=max_length*i
end=max_length+(max_length*i)
new_tracklets[next_track_id] = detections[start:end]
next_track_id += 1
detections, track_ids = split_tracklets(new_tracklets)
track_ids = renumber_track_ids(track_ids)
return detections, track_ids
if __name__=="__main__":
parser = argparse.ArgumentParser(description=__doc__)
tator.get_parser(parser)
parser.add_argument("--detection-type-id", type=int, required=True)
parser.add_argument("--tracklet-type-id", type=int, required=True)
parser.add_argument("--version-id", type=int)
parser.add_argument("--input-version-id", type=int)
parser.add_argument("--strategy-config", type=str)
parser.add_argument("--dry-run", action='store_true')
parser.add_argument('media_files', type=str, nargs='*')
args = parser.parse_args()
# Weight methods
methods = ['hybrid', 'iou', 'iou-motion', 'iou-global-motion']
# Weight methods that require the video
visual_methods = ['hybrid', 'iou-global-motion']
api = tator.get_api(args.host, args.token)
detection_type = api.get_localization_type(args.detection_type_id)
project = detection_type.project
version_id = args.version_id
default_strategy = {"method": "hybrid",
"frame-diffs": [1,2,4,8,16,32,64,128,256],
"args": {},
"extension": {'method' : None},
"max-length": {},
"min-length": 0}
if args.strategy_config:
strategy = {**default_strategy}
with open(args.strategy_config, "r") as strategy_file:
strategy.update(yaml.load(strategy_file))
else:
strategy = default_strategy
if strategy['method'] == 'hybrid':
model_file = strategy['args']['model_file']
batch_size = strategy['args'].get('batch_size', 4)
comparator=FeaturesComparator(model_file)
#extractor=FeaturesExtractor(args.model_file)
class_method = strategy.get('class-method',None)
classify_function = None
classify_args = {}
if class_method:
pip_package=class_method.get('pip',None)
if pip_package:
p = subprocess.run([sys.executable,
"-m",
"pip",
"install",
pip_package])
print("Finished process.", flush=True)
function_name = class_method.get('function',None)
classify_args = class_method.get('args',None)
names = function_name.split('.')
module = __import__(names[0])
for name in names[1:-1]:
module = getattr(module,name)
classify_function = getattr(module,names[-1])
print("Strategy: ", flush=True)
pprint(strategy)
print(args.media_files, flush=True)
optional_fetch_args = {}
if args.input_version_id:
optional_fetch_args['version'] = [args.input_version_id]
for media_file in args.media_files:
comps=os.path.splitext(os.path.basename(media_file))[0]
media_id=comps.split('_')[0]
media = api.get_media(media_id)
if media.attributes.get("Tracklet Generator Processed") != "No":
print(f"Skipping media ID {media.id}, name {media.name} due to "
f"'Tracklet Generator Processed' attribute being set to "
f"something other than 'No'!")
continue
media_shape = (media.height, media.width)
fps = media.fps
localizations_by_frame = {}
localizations = api.get_localization_list(project,
type=args.detection_type_id,
media_id=[media_id],
**optional_fetch_args)
localizations = [l.to_dict() for l in localizations]
if len(localizations) == 0:
print(f"No localizations present in media {media_file}", flush=True)
continue
print(f"Processing {len(localizations)} detections", flush=True)
# Group by localizations by frame
for lid, local in enumerate(localizations):
frame = local['frame']
if frame in localizations_by_frame:
localizations_by_frame[frame].append(local)
else:
localizations_by_frame[frame] = [local]
detections=[]
track_ids=[]
track_id=1
# If media does not exist, download it.
if strategy['method'] == 'iou-global-motion':
if not os.path.exists(media_file):
temp_path = f'/tmp/{os.path.basename(media_file)}'
for progress in tator.util.download_media(api, media, temp_path):
print(f"Downloading {media_file}, {progress}%...")
print("Download finished!")
# Unfrag the file
subprocess.run(["ffmpeg", '-i', temp_path, '-c:v', 'copy', media_file])
os.remove(temp_path)
if strategy['method'] == 'hybrid': # Not all visual methods need detection images
vid=cv2.VideoCapture(media_file)
ok=True
frame = 0
while ok:
ok,frame_bgr = vid.read()
if frame in localizations_by_frame:
for l in localizations_by_frame[frame]:
l['bgr'] = crop_localization(frame_bgr, l)
if l['attributes']['Confidence'] < 0.50:
continue
detections.append(l)
track_ids.append(track_id)
track_id += 1
frame+=1
else:
# The method is analytical on the detections coordinates
# and does not require processing the video
for frame,frame_detections in localizations_by_frame.items():
for det in frame_detections:
detections.append(det)
track_ids.append(track_id)
track_id += 1
print("Loaded all detections", flush=True)
track_ids = renumber_track_ids(track_ids)
if strategy['method'] == 'hybrid':
weights_strategy = HybridWeights(comparator,
None,
None,
media_shape,
fps,
0.0,
batch_size)
elif strategy['method'] == 'iou':
weights_strategy = IoUWeights(media_shape, **strategy['args'])
elif strategy['method'] == 'iou-motion':
weights_strategy = IoUMotionWeights(media_shape, **strategy['args'])
elif strategy['method'] == 'iou-global-motion':
weights_strategy = IoUGlobalMotionWeights(media_shape, media_file, **strategy['args'])
# Generate localization bgr based on grouped localizations
for x in strategy['frame-diffs']:
print(f"Started {x}", flush=True)
detections, track_ids, pairs, weights, is_cut, constraints = join_tracklets(
detections,
track_ids,
x,
weights_strategy)
if x in strategy['max-length']:
trim_to = strategy['max-length'][x]
print(f"Trimming track to max length of {trim_to}")
detections, track_ids = trim_tracklets(detections, track_ids, trim_to)
_,det_counts_per_track=np.unique(track_ids,return_counts=True)
print(f"frame-diff {x}: {len(detections)} to {len(det_counts_per_track)}", flush=True)
if x > 1 and strategy['extension']['method'] == 'linear-motion':
ext_frames=x
print(f"Extending by linear motion, {ext_frames}")
tracklets = join_up_iteration(detections,track_ids)
tracklets = extend_tracklets(tracklets, ext_frames)
detections, track_ids = split_tracklets(tracklets)
# Now we make new track objects based on the result
# from the graph solver
# [ detection, detection, detection, ...]
# [ track#, track#, track#,...]
# [ 133, 33, 13, 133,]
# [ 0,0,1,1]
# TODO: Handle is_cut?
def join_up_final(detections, track_ids):
tracklets = defaultdict(list)
num_tracklets = np.max(track_ids) + 1
assert(len(detections) == len(track_ids))
for d,tid in zip(detections, track_ids):
tracklets[tid].append(d)
return tracklets
def make_object(track):
track.sort(key=lambda x:x['frame'])
if classify_function:
valid,attrs = classify_function(media.to_dict(),
track,
**classify_args)
elif len(track) >= strategy['min-length']:
valid = True
attrs = {}
else:
valid = False
attrs = {}
if valid:
obj={"type": args.tracklet_type_id,
"media_ids": [int(media_id)],
"localization_ids": [x['id'] for x in track],
**attrs,
"version": version_id}
return obj
else:
return None
tracklets = join_up_final(detections, track_ids)
new_objs=[make_object(tracklet) for tracklet in tracklets.values()]
new_objs=[x for x in new_objs if x is not None]
print(f"New objects = {len(new_objs)}")
with open(f"/work/{media_id}.json", "w") as f:
json.dump(new_objs,f)
if not args.dry_run:
for response in tator.util.chunked_create(api.create_state_list,project,
state_spec=new_objs):
pass
try:
api.update_media(int(media_id), {"attributes":{"Tracklet Generator Processed": str(datetime.datetime.now())}})
except:
print("WARNING: Unable to set 'Tracklet Generator Processed' attribute")
| [((793, 810), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (804, 810), False, 'from collections import defaultdict\n'), ((4086, 4130), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (4109, 4130), False, 'import argparse\n'), ((4135, 4159), 'tator.get_parser', 'tator.get_parser', (['parser'], {}), '(parser)\n', (4151, 4159), False, 'import tator\n'), ((4811, 4847), 'tator.get_api', 'tator.get_api', (['args.host', 'args.token'], {}), '(args.host, args.token)\n', (4824, 4847), False, 'import tator\n'), ((6593, 6609), 'pprint.pprint', 'pprint', (['strategy'], {}), '(strategy)\n', (6599, 6609), False, 'from pprint import pprint\n'), ((839, 856), 'numpy.max', 'np.max', (['track_ids'], {}), '(track_ids)\n', (845, 856), True, 'import numpy as np\n'), ((5982, 6051), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-m', 'pip', 'install', pip_package]"], {}), "([sys.executable, '-m', 'pip', 'install', pip_package])\n", (5996, 6051), False, 'import subprocess\n'), ((8972, 9000), 'cv2.VideoCapture', 'cv2.VideoCapture', (['media_file'], {}), '(media_file)\n', (8988, 9000), False, 'import cv2\n'), ((11491, 11531), 'numpy.unique', 'np.unique', (['track_ids'], {'return_counts': '(True)'}), '(track_ids, return_counts=True)\n', (11500, 11531), True, 'import numpy as np\n'), ((12347, 12364), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12358, 12364), False, 'from collections import defaultdict\n'), ((13722, 13744), 'json.dump', 'json.dump', (['new_objs', 'f'], {}), '(new_objs, f)\n', (13731, 13744), False, 'import json\n'), ((13801, 13879), 'tator.util.chunked_create', 'tator.util.chunked_create', (['api.create_state_list', 'project'], {'state_spec': 'new_objs'}), '(api.create_state_list, project, state_spec=new_objs)\n', (13826, 13879), False, 'import tator\n'), ((5438, 5462), 'yaml.load', 'yaml.load', (['strategy_file'], {}), '(strategy_file)\n', (5447, 5462), False, 'import yaml\n'), ((6846, 6874), 'os.path.basename', 'os.path.basename', (['media_file'], {}), '(media_file)\n', (6862, 6874), False, 'import os\n'), ((8414, 8440), 'os.path.exists', 'os.path.exists', (['media_file'], {}), '(media_file)\n', (8428, 8440), False, 'import os\n'), ((8541, 8589), 'tator.util.download_media', 'tator.util.download_media', (['api', 'media', 'temp_path'], {}), '(api, media, temp_path)\n', (8566, 8589), False, 'import tator\n'), ((8756, 8827), 'subprocess.run', 'subprocess.run', (["['ffmpeg', '-i', temp_path, '-c:v', 'copy', media_file]"], {}), "(['ffmpeg', '-i', temp_path, '-c:v', 'copy', media_file])\n", (8770, 8827), False, 'import subprocess\n'), ((8844, 8864), 'os.remove', 'os.remove', (['temp_path'], {}), '(temp_path)\n', (8853, 8864), False, 'import os\n'), ((12393, 12410), 'numpy.max', 'np.max', (['track_ids'], {}), '(track_ids)\n', (12399, 12410), True, 'import numpy as np\n'), ((8478, 8506), 'os.path.basename', 'os.path.basename', (['media_file'], {}), '(media_file)\n', (8494, 8506), False, 'import os\n'), ((14071, 14094), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14092, 14094), False, 'import datetime\n')] |
Darkar25/HyperGAN | hypergan/losses/multi_loss.py | 76ef7e0c20569ceece88dc76396d92c77050692b | import tensorflow as tf
import numpy as np
import hyperchamber as hc
from hypergan.losses.base_loss import BaseLoss
from hypergan.multi_component import MultiComponent
TINY=1e-8
class MultiLoss(BaseLoss):
"""Takes multiple distributions and does an additional approximator"""
def _create(self, d_real, d_fake):
gan = self.gan
config = self.config
losses = []
split = self.split
for d in gan.discriminator.children:
if config.swapped:
d_swap = d_real
d_real = d_fake
d_fake = d_swap
ds = self.split_batch(d.sample, split)
d_real = ds[0]
d_fake = tf.add_n(ds[1:])/(len(ds)-1)
loss_object = self.config['loss_class'](gan, self.config, d_real=d_real, d_fake=d_fake)
losses.append(loss_object)
#relational layer?
combine = MultiComponent(combine='concat', components=losses)
g_loss = combine.g_loss_features
d_loss = combine.d_loss_features
self.d_loss = d_loss
self.g_loss = g_loss
self.losses = losses
return [d_loss, g_loss]
| [((908, 959), 'hypergan.multi_component.MultiComponent', 'MultiComponent', ([], {'combine': '"""concat"""', 'components': 'losses'}), "(combine='concat', components=losses)\n", (922, 959), False, 'from hypergan.multi_component import MultiComponent\n'), ((692, 708), 'tensorflow.add_n', 'tf.add_n', (['ds[1:]'], {}), '(ds[1:])\n', (700, 708), True, 'import tensorflow as tf\n')] |
mohan-pogala/fidesops | src/fidesops/api/v1/endpoints/policy_endpoints.py | 5c686362d4fb3b85253dd7e2898be1131a5071ab | import logging
from typing import Any, Dict, List
from fastapi import APIRouter, Body, Depends, Security
from fastapi_pagination import (
Page,
Params,
)
from fastapi_pagination.bases import AbstractPage
from fastapi_pagination.ext.sqlalchemy import paginate
from fidesops.schemas.shared_schemas import FidesOpsKey
from pydantic import conlist
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from starlette.exceptions import HTTPException
from starlette.status import HTTP_404_NOT_FOUND
from fidesops.api import deps
from fidesops.api.v1 import scope_registry as scopes
from fidesops.api.v1 import urn_registry as urls
from fidesops.common_exceptions import (
DataCategoryNotSupported,
PolicyValidationError,
RuleValidationError,
RuleTargetValidationError,
KeyOrNameAlreadyExists,
)
from fidesops.models.client import ClientDetail
from fidesops.models.policy import (
ActionType,
Policy,
Rule,
RuleTarget,
)
from fidesops.models.storage import StorageConfig
from fidesops.schemas import policy as schemas
from fidesops.schemas.api import BulkUpdateFailed
from fidesops.util.oauth_util import verify_oauth_client
router = APIRouter(tags=["Policy"], prefix=urls.V1_URL_PREFIX)
logger = logging.getLogger(__name__)
@router.get(
urls.POLICY_LIST,
status_code=200,
response_model=Page[schemas.PolicyResponse],
dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])],
)
def get_policy_list(
*,
db: Session = Depends(deps.get_db),
params: Params = Depends(),
) -> AbstractPage[Policy]:
"""
Return a paginated list of all Policy records in this system
"""
logger.info(f"Finding all policies with pagination params '{params}'")
policies = Policy.query(db=db)
return paginate(policies, params=params)
def get_policy_or_error(db: Session, policy_key: FidesOpsKey) -> Policy:
"""Helper method to load Policy or throw a 404"""
logger.info(f"Finding policy with key '{policy_key}'")
policy = Policy.get_by(db=db, field="key", value=policy_key)
if not policy:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No Policy found for key {policy_key}.",
)
return policy
@router.get(
urls.POLICY_DETAIL,
status_code=200,
response_model=schemas.PolicyResponse,
dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])],
)
def get_policy(
*,
policy_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
) -> schemas.PolicyResponse:
"""
Return a single Policy
"""
return get_policy_or_error(db, policy_key)
@router.patch(
urls.POLICY_LIST,
status_code=200,
response_model=schemas.BulkPutPolicyResponse,
)
def create_or_update_policies(
*,
client: ClientDetail = Security(
verify_oauth_client,
scopes=[scopes.POLICY_CREATE_OR_UPDATE],
),
db: Session = Depends(deps.get_db),
data: conlist(schemas.Policy, max_items=50) = Body(...), # type: ignore
) -> schemas.BulkPutPolicyResponse:
"""
Given a list of policy data elements, create or update corresponding Policy objects
or report failure
"""
created_or_updated: List[Policy] = []
failed: List[BulkUpdateFailed] = []
logger.info(f"Starting bulk upsert for {len(data)} policies")
for policy_schema in data:
policy_data: Dict[str, Any] = dict(policy_schema)
try:
policy = Policy.create_or_update(
db=db,
data={
"name": policy_data["name"],
"key": policy_data.get("key"),
"client_id": client.id,
},
)
except KeyOrNameAlreadyExists as exc:
logger.warning("Create/update failed for policy: %s", exc)
failure = {
"message": exc.args[0],
"data": policy_data,
}
failed.append(BulkUpdateFailed(**failure))
continue
except PolicyValidationError as exc:
logger.warning("Create/update failed for policy: %s", exc)
failure = {
"message": "This record could not be added because the data provided was invalid.",
"data": policy_data,
}
failed.append(BulkUpdateFailed(**failure))
continue
else:
created_or_updated.append(policy)
return schemas.BulkPutPolicyResponse(
succeeded=created_or_updated,
failed=failed,
)
@router.patch(
urls.RULE_LIST,
status_code=200,
response_model=schemas.BulkPutRuleResponse,
)
def create_or_update_rules(
*,
client: ClientDetail = Security(
verify_oauth_client,
scopes=[scopes.RULE_CREATE_OR_UPDATE],
),
policy_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
input_data: conlist(schemas.RuleCreate, max_items=50) = Body(...), # type: ignore
) -> schemas.BulkPutRuleResponse:
"""
Given a list of Rule data elements, create or update corresponding Rule objects
or report failure
"""
logger.info(f"Finding policy with key '{policy_key}'")
policy = get_policy_or_error(db, policy_key)
created_or_updated: List[Rule] = []
failed: List[BulkUpdateFailed] = []
logger.info(
f"Starting bulk upsert for {len(input_data)} rules on policy {policy_key}"
)
for schema in input_data:
# Validate all FKs in the input data exist
associated_storage_config_id = None
if schema.action_type == ActionType.access.value:
# Only validate the associated StorageConfig on access rules
storage_destination_key = schema.storage_destination_key
associated_storage_config: StorageConfig = StorageConfig.get_by(
db=db,
field="key",
value=storage_destination_key,
)
if not associated_storage_config:
logger.warning(
f"No storage config found with key {storage_destination_key}"
)
failure = {
"message": f"A StorageConfig with key {storage_destination_key} does not exist",
"data": dict(
schema
), # Be sure to pass the schema out the same way it came in
}
failed.append(BulkUpdateFailed(**failure))
continue
else:
associated_storage_config_id = associated_storage_config.id
masking_strategy_data = None
if schema.masking_strategy:
masking_strategy_data = schema.masking_strategy.dict()
try:
rule = Rule.create_or_update(
db=db,
data={
"action_type": schema.action_type,
"client_id": client.id,
"key": schema.key,
"name": schema.name,
"policy_id": policy.id,
"storage_destination_id": associated_storage_config_id,
"masking_strategy": masking_strategy_data,
},
)
except KeyOrNameAlreadyExists as exc:
logger.warning(
f"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
except RuleValidationError as exc:
logger.warning(
f"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
except ValueError as exc:
logger.warning(
f"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
else:
created_or_updated.append(rule)
return schemas.BulkPutRuleResponse(succeeded=created_or_updated, failed=failed)
@router.delete(
urls.RULE_DETAIL,
status_code=204,
dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])],
)
def delete_rule(
*,
policy_key: FidesOpsKey,
rule_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
) -> None:
"""
Delete a policy rule.
"""
policy = get_policy_or_error(db, policy_key)
logger.info(f"Finding rule with key '{rule_key}'")
rule = Rule.filter(
db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id)
).first()
if not rule:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No Rule found for key {rule_key} on Policy {policy_key}.",
)
logger.info(f"Deleting rule with key '{rule_key}'")
rule.delete(db=db)
@router.patch(
urls.RULE_TARGET_LIST,
status_code=200,
response_model=schemas.BulkPutRuleTargetResponse,
)
def create_or_update_rule_targets(
*,
client: ClientDetail = Security(
verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE]
),
policy_key: FidesOpsKey,
rule_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
input_data: conlist(schemas.RuleTarget, max_items=50) = Body(...), # type: ignore
) -> schemas.BulkPutRuleTargetResponse:
"""
Given a list of Rule data elements, create corresponding Rule objects
or report failure
"""
policy = get_policy_or_error(db, policy_key)
logger.info(f"Finding rule with key '{rule_key}'")
rule = Rule.filter(
db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id)
).first()
if not rule:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No Rule found for key {rule_key} on Policy {policy_key}.",
)
created_or_updated = []
failed = []
logger.info(
f"Starting bulk upsert for {len(input_data)} rule targets on rule {rule_key}"
)
for schema in input_data:
try:
target = RuleTarget.create_or_update(
db=db,
data={
"name": schema.name,
"key": schema.key,
"data_category": schema.data_category,
"rule_id": rule.id,
"client_id": client.id,
},
)
except KeyOrNameAlreadyExists as exc:
logger.warning(
f"Create/update failed for rule target {schema.key} on rule {rule_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
except (
DataCategoryNotSupported,
PolicyValidationError,
RuleTargetValidationError,
) as exc:
logger.warning(
f"Create/update failed for rule target {schema.key} on rule {rule_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
except IntegrityError as exc:
logger.warning(
f"Create/update failed for rule target {schema.key} on rule {rule_key}: {exc}"
)
failure = {
"message": f"DataCategory {schema.data_category} is already specified on Rule with ID {rule.id}",
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
else:
created_or_updated.append(target)
return schemas.BulkPutRuleTargetResponse(
succeeded=created_or_updated,
failed=failed,
)
@router.delete(
urls.RULE_TARGET_DETAIL,
status_code=204,
dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])],
)
def delete_rule_target(
*,
policy_key: FidesOpsKey,
rule_key: FidesOpsKey,
rule_target_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
) -> None:
"""
Delete the rule target.
"""
policy = get_policy_or_error(db, policy_key)
logger.info(f"Finding rule with key '{rule_key}'")
rule = Rule.filter(
db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id)
).first()
if not rule:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No Rule found for key {rule_key} on Policy {policy_key}.",
)
logger.info(f"Finding rule target with key '{rule_target_key}'")
target = RuleTarget.filter(
db=db,
conditions=(
RuleTarget.key == rule_target_key and RuleTarget.rule_id == rule.id
),
).first()
if not target:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No RuleTarget found for key {rule_target_key} at Rule {rule_key} on Policy {policy_key}.",
)
logger.info(f"Deleting rule target with key '{rule_target_key}'")
target.delete(db=db)
| [((1199, 1252), 'fastapi.APIRouter', 'APIRouter', ([], {'tags': "['Policy']", 'prefix': 'urls.V1_URL_PREFIX'}), "(tags=['Policy'], prefix=urls.V1_URL_PREFIX)\n", (1208, 1252), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((1263, 1290), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1280, 1290), False, 'import logging\n'), ((1525, 1545), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (1532, 1545), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((1568, 1577), 'fastapi.Depends', 'Depends', ([], {}), '()\n', (1575, 1577), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((1777, 1796), 'fidesops.models.policy.Policy.query', 'Policy.query', ([], {'db': 'db'}), '(db=db)\n', (1789, 1796), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((1808, 1841), 'fastapi_pagination.ext.sqlalchemy.paginate', 'paginate', (['policies'], {'params': 'params'}), '(policies, params=params)\n', (1816, 1841), False, 'from fastapi_pagination.ext.sqlalchemy import paginate\n'), ((2043, 2094), 'fidesops.models.policy.Policy.get_by', 'Policy.get_by', ([], {'db': 'db', 'field': '"""key"""', 'value': 'policy_key'}), "(db=db, field='key', value=policy_key)\n", (2056, 2094), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((2531, 2551), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (2538, 2551), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((2849, 2919), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.POLICY_CREATE_OR_UPDATE]'}), '(verify_oauth_client, scopes=[scopes.POLICY_CREATE_OR_UPDATE])\n', (2857, 2919), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((2962, 2982), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (2969, 2982), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((3034, 3043), 'fastapi.Body', 'Body', (['...'], {}), '(...)\n', (3038, 3043), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((4490, 4564), 'fidesops.schemas.policy.BulkPutPolicyResponse', 'schemas.BulkPutPolicyResponse', ([], {'succeeded': 'created_or_updated', 'failed': 'failed'}), '(succeeded=created_or_updated, failed=failed)\n', (4519, 4564), True, 'from fidesops.schemas import policy as schemas\n'), ((4758, 4826), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.RULE_CREATE_OR_UPDATE]'}), '(verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE])\n', (4766, 4826), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((4898, 4918), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (4905, 4918), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((4980, 4989), 'fastapi.Body', 'Body', (['...'], {}), '(...)\n', (4984, 4989), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((8440, 8512), 'fidesops.schemas.policy.BulkPutRuleResponse', 'schemas.BulkPutRuleResponse', ([], {'succeeded': 'created_or_updated', 'failed': 'failed'}), '(succeeded=created_or_updated, failed=failed)\n', (8467, 8512), True, 'from fidesops.schemas import policy as schemas\n'), ((8753, 8773), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (8760, 8773), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((9503, 9571), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.RULE_CREATE_OR_UPDATE]'}), '(verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE])\n', (9511, 9571), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((9661, 9681), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (9668, 9681), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((9743, 9752), 'fastapi.Body', 'Body', (['...'], {}), '(...)\n', (9747, 9752), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((12218, 12296), 'fidesops.schemas.policy.BulkPutRuleTargetResponse', 'schemas.BulkPutRuleTargetResponse', ([], {'succeeded': 'created_or_updated', 'failed': 'failed'}), '(succeeded=created_or_updated, failed=failed)\n', (12251, 12296), True, 'from fidesops.schemas import policy as schemas\n'), ((12608, 12628), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (12615, 12628), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((2128, 2227), 'starlette.exceptions.HTTPException', 'HTTPException', ([], {'status_code': 'HTTP_404_NOT_FOUND', 'detail': 'f"""No Policy found for key {policy_key}."""'}), "(status_code=HTTP_404_NOT_FOUND, detail=\n f'No Policy found for key {policy_key}.')\n", (2141, 2227), False, 'from starlette.exceptions import HTTPException\n'), ((2994, 3031), 'pydantic.conlist', 'conlist', (['schemas.Policy'], {'max_items': '(50)'}), '(schemas.Policy, max_items=50)\n', (3001, 3031), False, 'from pydantic import conlist\n'), ((4936, 4977), 'pydantic.conlist', 'conlist', (['schemas.RuleCreate'], {'max_items': '(50)'}), '(schemas.RuleCreate, max_items=50)\n', (4943, 4977), False, 'from pydantic import conlist\n'), ((9084, 9202), 'starlette.exceptions.HTTPException', 'HTTPException', ([], {'status_code': 'HTTP_404_NOT_FOUND', 'detail': 'f"""No Rule found for key {rule_key} on Policy {policy_key}."""'}), "(status_code=HTTP_404_NOT_FOUND, detail=\n f'No Rule found for key {rule_key} on Policy {policy_key}.')\n", (9097, 9202), False, 'from starlette.exceptions import HTTPException\n'), ((9699, 9740), 'pydantic.conlist', 'conlist', (['schemas.RuleTarget'], {'max_items': '(50)'}), '(schemas.RuleTarget, max_items=50)\n', (9706, 9740), False, 'from pydantic import conlist\n'), ((10177, 10295), 'starlette.exceptions.HTTPException', 'HTTPException', ([], {'status_code': 'HTTP_404_NOT_FOUND', 'detail': 'f"""No Rule found for key {rule_key} on Policy {policy_key}."""'}), "(status_code=HTTP_404_NOT_FOUND, detail=\n f'No Rule found for key {rule_key} on Policy {policy_key}.')\n", (10190, 10295), False, 'from starlette.exceptions import HTTPException\n'), ((12940, 13058), 'starlette.exceptions.HTTPException', 'HTTPException', ([], {'status_code': 'HTTP_404_NOT_FOUND', 'detail': 'f"""No Rule found for key {rule_key} on Policy {policy_key}."""'}), "(status_code=HTTP_404_NOT_FOUND, detail=\n f'No Rule found for key {rule_key} on Policy {policy_key}.')\n", (12953, 13058), False, 'from starlette.exceptions import HTTPException\n'), ((13365, 13520), 'starlette.exceptions.HTTPException', 'HTTPException', ([], {'status_code': 'HTTP_404_NOT_FOUND', 'detail': 'f"""No RuleTarget found for key {rule_target_key} at Rule {rule_key} on Policy {policy_key}."""'}), "(status_code=HTTP_404_NOT_FOUND, detail=\n f'No RuleTarget found for key {rule_target_key} at Rule {rule_key} on Policy {policy_key}.'\n )\n", (13378, 13520), False, 'from starlette.exceptions import HTTPException\n'), ((1416, 1474), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.POLICY_READ]'}), '(verify_oauth_client, scopes=[scopes.POLICY_READ])\n', (1424, 1474), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((2398, 2456), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.POLICY_READ]'}), '(verify_oauth_client, scopes=[scopes.POLICY_READ])\n', (2406, 2456), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((5841, 5912), 'fidesops.models.storage.StorageConfig.get_by', 'StorageConfig.get_by', ([], {'db': 'db', 'field': '"""key"""', 'value': 'storage_destination_key'}), "(db=db, field='key', value=storage_destination_key)\n", (5861, 5912), False, 'from fidesops.models.storage import StorageConfig\n'), ((6799, 7068), 'fidesops.models.policy.Rule.create_or_update', 'Rule.create_or_update', ([], {'db': 'db', 'data': "{'action_type': schema.action_type, 'client_id': client.id, 'key': schema.\n key, 'name': schema.name, 'policy_id': policy.id,\n 'storage_destination_id': associated_storage_config_id,\n 'masking_strategy': masking_strategy_data}"}), "(db=db, data={'action_type': schema.action_type,\n 'client_id': client.id, 'key': schema.key, 'name': schema.name,\n 'policy_id': policy.id, 'storage_destination_id':\n associated_storage_config_id, 'masking_strategy': masking_strategy_data})\n", (6820, 7068), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((8945, 9032), 'fidesops.models.policy.Rule.filter', 'Rule.filter', ([], {'db': 'db', 'conditions': '(Rule.key == rule_key and Rule.policy_id == policy.id)'}), '(db=db, conditions=Rule.key == rule_key and Rule.policy_id ==\n policy.id)\n', (8956, 9032), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((8592, 8650), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.RULE_DELETE]'}), '(verify_oauth_client, scopes=[scopes.RULE_DELETE])\n', (8600, 8650), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((10038, 10125), 'fidesops.models.policy.Rule.filter', 'Rule.filter', ([], {'db': 'db', 'conditions': '(Rule.key == rule_key and Rule.policy_id == policy.id)'}), '(db=db, conditions=Rule.key == rule_key and Rule.policy_id ==\n policy.id)\n', (10049, 10125), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((10544, 10717), 'fidesops.models.policy.RuleTarget.create_or_update', 'RuleTarget.create_or_update', ([], {'db': 'db', 'data': "{'name': schema.name, 'key': schema.key, 'data_category': schema.\n data_category, 'rule_id': rule.id, 'client_id': client.id}"}), "(db=db, data={'name': schema.name, 'key': schema\n .key, 'data_category': schema.data_category, 'rule_id': rule.id,\n 'client_id': client.id})\n", (10571, 10717), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((12801, 12888), 'fidesops.models.policy.Rule.filter', 'Rule.filter', ([], {'db': 'db', 'conditions': '(Rule.key == rule_key and Rule.policy_id == policy.id)'}), '(db=db, conditions=Rule.key == rule_key and Rule.policy_id ==\n policy.id)\n', (12812, 12888), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((13172, 13281), 'fidesops.models.policy.RuleTarget.filter', 'RuleTarget.filter', ([], {'db': 'db', 'conditions': '(RuleTarget.key == rule_target_key and RuleTarget.rule_id == rule.id)'}), '(db=db, conditions=RuleTarget.key == rule_target_key and \n RuleTarget.rule_id == rule.id)\n', (13189, 13281), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((12406, 12464), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.RULE_DELETE]'}), '(verify_oauth_client, scopes=[scopes.RULE_DELETE])\n', (12414, 12464), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((4001, 4028), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (4017, 4028), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((4368, 4395), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (4384, 4395), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((6477, 6504), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (6493, 6504), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((7587, 7614), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (7603, 7614), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((7958, 7985), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (7974, 7985), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((8320, 8347), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (8336, 8347), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((11200, 11227), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (11216, 11227), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((11676, 11703), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (11692, 11703), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((12117, 12144), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (12133, 12144), False, 'from fidesops.schemas.api import BulkUpdateFailed\n')] |
oliveriopt/mood-analytics | engage-analytics/sentiment_analysis/src/report/interface_report.py | c98eb8c483a05af938a2f6f49d8ea803f5711572 | import emoji
import sentiment_analysis.src.report.cons_report as cons
import sentiment_analysis.src.constants as global_cons
from utils.data_connection.api_data_manager import APISourcesFetcher
from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question
from sentiment_analysis.src.word_cloud import words_clouds
from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment
from nested_lookup import nested_lookup
class InterFaceReport:
def __init__(self, topics: dict, surveys: dict, company_id: str, weeks: list,
g_client: ClientsLanguageSentiment,
api_source_manager: APISourcesFetcher):
self.topics = topics
self.surveys = surveys
self.company_id = company_id
self.weeks = weeks
self.g_client = g_client
self.api_source_manager = api_source_manager
self.thresholds = ()
self.table_surveys_replies = []
self.table_topics = []
self.table_topic_comment = []
self.counter_text_sr = None
self.counter_text_topics = None
self.info_file = read_json_file("en_US.json")
self.image_base64_sr = None
self.image_base64_topics = None
def sort_by_dimension_sentiment_table(self) -> None:
"""
Sort by dimension and by sentiment
:return:
"""
temp_table = []
for dimension in cons.dimensions:
temp = [d for d in self.table_surveys_replies if d['dimension'] == dimension]
temp = sorted(temp, key=lambda k: k['sentiment'], reverse=True)
temp_table.extend(temp)
self.table_surveys_replies = temp_table
def insert_to_list_surveys_replies(self, features: list, company_week: int) -> None:
"""
Create array with the dictionary for interface
:param features: list of features to extract
:param company_week: company week of the company
:return:
"""
for item_analyze in features:
question = extract_question(self.info_file, dimension=item_analyze[0], week=company_week)
dimension = extract_dimension(self.info_file, dimension=item_analyze[0])
comment = item_analyze[1]
sentiment = item_analyze[2]
temp = {}
temp.update(dimension=dimension)
temp.update(question=question)
temp.update(comment=emoji.emojize(comment, use_aliases=True))
temp.update(sentiment=sentiment)
self.table_surveys_replies.append(temp)
self.sort_by_dimension_sentiment_table()
def insert_to_list_topics(self, features: list) -> None:
"""
Create array with the dictionary for interface - referenced to topic headlines
:param features: list of features to extract
:return:
"""
for item_analyze in features:
topic_id = item_analyze[0]
comment = item_analyze[1]
sentiment = item_analyze[2]
temp = {}
temp.update(id=topic_id)
temp.update(comment=emoji.emojize(comment, use_aliases=True))
temp.update(sentiment=sentiment)
self.table_topics.append(temp)
self.table_topics = sorted(self.table_topics, key=lambda k: k['sentiment'], reverse=True)
def insert_to_list_topic_comments(self, features: list) -> None:
"""
Create array with the dictionary for interface - referenced to topic comments
:param features: list of features to extract
:return:
"""
for item_analyze in features:
topic_id_comment_id = item_analyze[0]
comment = item_analyze[1]
sentiment = item_analyze[2]
temp = {}
temp.update(id=topic_id_comment_id)
temp.update(comment=emoji.emojize(comment, use_aliases=True))
temp.update(sentiment=sentiment)
self.table_topic_comment.append(temp)
self.table_topic_comment = sorted(self.table_topic_comment, key=lambda k: k['sentiment'], reverse=True)
def word_cloud(self):
"""
Create wordcloud of the main words
:return:
"""
self.image_base64_sr = words_clouds(self.counter_text_sr, cons.path_image_sr_wc)
self.image_base64_topics = words_clouds(self.counter_text_topics, cons.path_image_topics_wc)
@staticmethod
def __count_filter_keys(entities: list) -> object:
"""
Count and filter keys
:param entities: list of entities text
:return:
"""
entities = ClientsLanguageSentiment.count_entities(entities=entities)
entities = ClientsLanguageSentiment.filter_black_list(entities=entities)
return entities
def __process_sr(self) -> None:
"""
Process the surveys replies
:return:
"""
for company_id, periods in self.surveys.items():
for period in self.weeks:
period_parts = period.split(CUSTOM_YEAR_WEEK_AGG)
translations_week = self.api_source_manager.get_company_week_from_period(week=period_parts[0],
year=period_parts[1],
company_id=self.company_id)
sr_dimension = nested_lookup(global_cons.SR_DIMENSION, periods)
sr_content = nested_lookup(global_cons.SR_CONTENT, periods)
sr_sentiment = nested_lookup(global_cons.SENTIMENT, periods)
sr_entities = nested_lookup(global_cons.SR_ENTITIES, periods)
sr_comment_score = list(zip(sr_dimension, sr_content, sr_sentiment))
self.insert_to_list_surveys_replies(sr_comment_score, company_week=translations_week)
self.counter_text_sr = self.__count_filter_keys(entities=sr_entities)
def __process_topics(self) -> None:
"""
Process the topics
:return:
"""
for company_id, topics in self.topics.items():
# heading
topic_headings = nested_lookup(global_cons.TOPIC_CONTENT, topics)
topic_headings_sentiments = nested_lookup(global_cons.TOPIC_SENTIMENT, topics)
topic_ids = list(topics.keys())
topic_w_sentiments = list(zip(topic_ids, topic_headings, topic_headings_sentiments))
self.insert_to_list_topics(topic_w_sentiments)
# comments
for topic_id, topic in topics.items():
topic_comments = nested_lookup(global_cons.TOPIC_COMMENT, topic)
topic_comments_scores = nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT, topic)
topic_list_ids = [topic_id] * len(topic_comments)
topic_w_scores = list(zip(topic_list_ids, topic_comments, topic_comments_scores))
self.insert_to_list_topic_comments(topic_w_scores)
entities = nested_lookup(global_cons.TOPIC_ENTITIES, topics)
self.counter_text_topics = ClientsLanguageSentiment.count_entities(entities)
def process_interface(self) -> None:
"""
Take the info needed to write into report_pdf
:return:
"""
self.__process_sr()
self.__process_topics()
| [((1160, 1188), 'utils.utilities.read_json_file', 'read_json_file', (['"""en_US.json"""'], {}), "('en_US.json')\n", (1174, 1188), False, 'from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question\n'), ((4289, 4346), 'sentiment_analysis.src.word_cloud.words_clouds', 'words_clouds', (['self.counter_text_sr', 'cons.path_image_sr_wc'], {}), '(self.counter_text_sr, cons.path_image_sr_wc)\n', (4301, 4346), False, 'from sentiment_analysis.src.word_cloud import words_clouds\n'), ((4382, 4447), 'sentiment_analysis.src.word_cloud.words_clouds', 'words_clouds', (['self.counter_text_topics', 'cons.path_image_topics_wc'], {}), '(self.counter_text_topics, cons.path_image_topics_wc)\n', (4394, 4447), False, 'from sentiment_analysis.src.word_cloud import words_clouds\n'), ((4659, 4717), 'sentiment_analysis.src.clients_language_sentiments_entity.ClientsLanguageSentiment.count_entities', 'ClientsLanguageSentiment.count_entities', ([], {'entities': 'entities'}), '(entities=entities)\n', (4698, 4717), False, 'from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment\n'), ((4737, 4798), 'sentiment_analysis.src.clients_language_sentiments_entity.ClientsLanguageSentiment.filter_black_list', 'ClientsLanguageSentiment.filter_black_list', ([], {'entities': 'entities'}), '(entities=entities)\n', (4779, 4798), False, 'from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment\n'), ((2082, 2160), 'utils.utilities.extract_question', 'extract_question', (['self.info_file'], {'dimension': 'item_analyze[0]', 'week': 'company_week'}), '(self.info_file, dimension=item_analyze[0], week=company_week)\n', (2098, 2160), False, 'from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question\n'), ((2185, 2245), 'utils.utilities.extract_dimension', 'extract_dimension', (['self.info_file'], {'dimension': 'item_analyze[0]'}), '(self.info_file, dimension=item_analyze[0])\n', (2202, 2245), False, 'from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question\n'), ((6240, 6288), 'nested_lookup.nested_lookup', 'nested_lookup', (['global_cons.TOPIC_CONTENT', 'topics'], {}), '(global_cons.TOPIC_CONTENT, topics)\n', (6253, 6288), False, 'from nested_lookup import nested_lookup\n'), ((6329, 6379), 'nested_lookup.nested_lookup', 'nested_lookup', (['global_cons.TOPIC_SENTIMENT', 'topics'], {}), '(global_cons.TOPIC_SENTIMENT, topics)\n', (6342, 6379), False, 'from nested_lookup import nested_lookup\n'), ((7089, 7138), 'nested_lookup.nested_lookup', 'nested_lookup', (['global_cons.TOPIC_ENTITIES', 'topics'], {}), '(global_cons.TOPIC_ENTITIES, topics)\n', (7102, 7138), False, 'from nested_lookup import nested_lookup\n'), ((7178, 7227), 'sentiment_analysis.src.clients_language_sentiments_entity.ClientsLanguageSentiment.count_entities', 'ClientsLanguageSentiment.count_entities', (['entities'], {}), '(entities)\n', (7217, 7227), False, 'from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment\n'), ((5470, 5518), 'nested_lookup.nested_lookup', 'nested_lookup', (['global_cons.SR_DIMENSION', 'periods'], {}), '(global_cons.SR_DIMENSION, periods)\n', (5483, 5518), False, 'from nested_lookup import nested_lookup\n'), ((5548, 5594), 'nested_lookup.nested_lookup', 'nested_lookup', (['global_cons.SR_CONTENT', 'periods'], {}), '(global_cons.SR_CONTENT, periods)\n', (5561, 5594), False, 'from nested_lookup import nested_lookup\n'), ((5626, 5671), 'nested_lookup.nested_lookup', 'nested_lookup', (['global_cons.SENTIMENT', 'periods'], {}), '(global_cons.SENTIMENT, periods)\n', (5639, 5671), False, 'from nested_lookup import nested_lookup\n'), ((5702, 5749), 'nested_lookup.nested_lookup', 'nested_lookup', (['global_cons.SR_ENTITIES', 'periods'], {}), '(global_cons.SR_ENTITIES, periods)\n', (5715, 5749), False, 'from nested_lookup import nested_lookup\n'), ((6688, 6735), 'nested_lookup.nested_lookup', 'nested_lookup', (['global_cons.TOPIC_COMMENT', 'topic'], {}), '(global_cons.TOPIC_COMMENT, topic)\n', (6701, 6735), False, 'from nested_lookup import nested_lookup\n'), ((6776, 6833), 'nested_lookup.nested_lookup', 'nested_lookup', (['global_cons.TOPIC_COMMENT_SENTIMENT', 'topic'], {}), '(global_cons.TOPIC_COMMENT_SENTIMENT, topic)\n', (6789, 6833), False, 'from nested_lookup import nested_lookup\n'), ((2467, 2507), 'emoji.emojize', 'emoji.emojize', (['comment'], {'use_aliases': '(True)'}), '(comment, use_aliases=True)\n', (2480, 2507), False, 'import emoji\n'), ((3147, 3187), 'emoji.emojize', 'emoji.emojize', (['comment'], {'use_aliases': '(True)'}), '(comment, use_aliases=True)\n', (3160, 3187), False, 'import emoji\n'), ((3896, 3936), 'emoji.emojize', 'emoji.emojize', (['comment'], {'use_aliases': '(True)'}), '(comment, use_aliases=True)\n', (3909, 3936), False, 'import emoji\n')] |
dnguyenngoc/analytic | dwh_analytic/dags/data_warehouse_prod/schema/dim_process.py | d609a93e96e7c546ad3ee3ebd4e13309ddf575f8 | resource ='human ad machime'
class DimProcess:
def __init__(
self,
*kwargs,
process_key: int,
module: str,
type: str,
step: str,
sub_step: str,
resource: str = 'human',
):
def step(self):
return ['qc', 'auto_qc', 'apr_qc', 'keyer_input']
def example_data(self):
data = {
'process_key': 1,
'resource': 'human',
'module': 'keyed_data',
'step': 'qc',
'sub_step': None,
'process_key': 2,
'resource': 'machine',
'module': 'keyed_data',
'step': 'transform',
'sub_step': None,
}
class FactDataExtractionModel:
def __init__(
self,
*kwargs,
project_id: str,
document_id: str,
doc_set_id: str,
last_modified_time_key: int,
last_modified_date_key: int,
user_name: str = None,
process_key: int,
field_name: str,
field_value: str = None,
last_modified_timestamp: str
):
self.project_id = project_id
self.document_id = document_id
self.doc_set_id = doc_set_id
self.last_modified_time_key = last_modified_time_key
self.last_modified_date_key = last_modified_date_key
self.user_name = user_name
self.process_key = process_key
self.field_name = field_name
self.field_value = field_value
self.last_modified_timestamp = last_modified_timestamp | [] |
NewRGB/lino | lino/modlib/gfks/mixins.py | 43799e42107169ff173d3b8bc0324d5773471499 | # -*- coding: UTF-8 -*-
# Copyright 2010-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from builtins import object
from django.contrib.contenttypes.models import *
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils.text import format_lazy
from lino.api import dd
from lino.core.gfks import gfk2lookup
from .fields import GenericForeignKey, GenericForeignKeyIdField
class Controllable(dd.Model):
# Translators: will also be concatenated with '(type)' '(object)'
owner_label = _('Controlled by')
controller_is_optional = True
class Meta(object):
abstract = True
owner_type = dd.ForeignKey(
ContentType,
editable=True,
blank=True, null=True,
verbose_name=format_lazy(u"{} {}", owner_label, _('(type)')))
owner_id = GenericForeignKeyIdField(
owner_type,
editable=True,
blank=True, null=True,
verbose_name=format_lazy(u"{} {}", owner_label, _('(object)')))
owner = GenericForeignKey(
'owner_type', 'owner_id',
verbose_name=owner_label)
@classmethod
def update_controller_field(cls, verbose_name=None, **kwargs):
if verbose_name is not None:
dd.update_field(cls, 'owner', verbose_name=verbose_name)
kwargs.update(
verbose_name=format_lazy(u"{} {}",
verbose_name, _('(object)')))
dd.update_field(cls, 'owner_id', **kwargs)
if verbose_name is not None:
kwargs.update(
verbose_name=format_lazy(u"{} {}",
verbose_name, _('(type)')))
dd.update_field(cls, 'owner_type', **kwargs)
def update_owned_instance(self, controllable):
if self.owner:
self.owner.update_owned_instance(controllable)
super(Controllable, self).update_owned_instance(controllable)
def save(self, *args, **kw):
if settings.SITE.loading_from_dump:
super(Controllable, self).save(*args, **kw)
else:
if self.owner:
self.owner.update_owned_instance(self)
super(Controllable, self).save(*args, **kw)
if self.owner:
self.owner.after_update_owned_instance(self)
def controlled_rows(self, model, **kwargs):
gfk = self._meta.get_field('owner')
kwargs = gfk2lookup(gfk, self, **kwargs)
return model.objects.filter(**kwargs)
| [((568, 586), 'django.utils.translation.ugettext_lazy', '_', (['"""Controlled by"""'], {}), "('Controlled by')\n", (569, 586), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1465, 1507), 'lino.api.dd.update_field', 'dd.update_field', (['cls', '"""owner_id"""'], {}), "(cls, 'owner_id', **kwargs)\n", (1480, 1507), False, 'from lino.api import dd\n'), ((1679, 1723), 'lino.api.dd.update_field', 'dd.update_field', (['cls', '"""owner_type"""'], {}), "(cls, 'owner_type', **kwargs)\n", (1694, 1723), False, 'from lino.api import dd\n'), ((2412, 2443), 'lino.core.gfks.gfk2lookup', 'gfk2lookup', (['gfk', 'self'], {}), '(gfk, self, **kwargs)\n', (2422, 2443), False, 'from lino.core.gfks import gfk2lookup\n'), ((1272, 1328), 'lino.api.dd.update_field', 'dd.update_field', (['cls', '"""owner"""'], {'verbose_name': 'verbose_name'}), "(cls, 'owner', verbose_name=verbose_name)\n", (1287, 1328), False, 'from lino.api import dd\n'), ((835, 846), 'django.utils.translation.ugettext_lazy', '_', (['"""(type)"""'], {}), "('(type)')\n", (836, 846), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1021, 1034), 'django.utils.translation.ugettext_lazy', '_', (['"""(object)"""'], {}), "('(object)')\n", (1022, 1034), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1441, 1454), 'django.utils.translation.ugettext_lazy', '_', (['"""(object)"""'], {}), "('(object)')\n", (1442, 1454), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1657, 1668), 'django.utils.translation.ugettext_lazy', '_', (['"""(type)"""'], {}), "('(type)')\n", (1658, 1668), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
1enes/optical_form_reader | optical_form_reader/main.py | fab99f2403c25f84fcb5bdac50148ab248432516 | import cv2
import numpy as np
from imutils import contours
from imutils.perspective import four_point_transform
import imutils
import cv2
import matplotlib.pyplot as plt
import numpy as np
from imutils import contours
from imutils.perspective import four_point_transform,order_points
import imutils
cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #,
alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'}
def cevap_islemleri(isim,coords):
a=0
thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="top-to-bottom")[0]
for (s,i) in enumerate(np.arange(0,len(coords),20)):
cevap=None
cnt=contours.sort_contours(coords[i:i+30])[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
a+=1
toplam_beyaz=cv2.countNonZero(maske)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
def cevap_contour_bul(isim,isim_gri):
coord=[]
thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8)
contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
x_coords=[(0,0)]
sayac=0
contour=imutils.grab_contours(contour)
contour=contours.sort_contours(contour,method="top-to-bottom")[0]
for c in contour:
approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True)
area=cv2.contourArea(approx)
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
if area<1500 and area>250 and ar>=0.9 and ar<=1.1:
box=cv2.minAreaRect(approx)
box=cv2.boxPoints(box)
box=np.array(box,dtype=np.int)
M=cv2.moments(box)
x=int(M['m10']/M['m00'])
y=int(M['m01']/M['m00'])
res=tekrar_bul(x_coords,x)
if res is False and abs(x_coords[-1][1]-y)<35:
coord.append(approx)
x_coords.append((x,y))
sayac+=1
#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)
#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)
elif abs(x_coords[-1][1]-y)>=35:
coord.append(approx)
x_coords=[(0,0)]
sayac+=1
x_coords.append((x,y))
#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)
#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)
else:
continue
return coord
def ters_bul(kagit,areas):
ret=False
#print(areas[0][0])
if areas[0][0]!=1 and areas[0][1]+areas[1][1]>2300000:
kagit=imutils.rotate(kagit,angle=180)
print("Kağıdı ters koymuşsunuz,çevrildi")
ret=True
return ret,kagit
else:
return ret,kagit
def kagit_bul(image,gray):
thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1]
contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
contour=imutils.grab_contours(contour)
contour=sorted(contour,key=cv2.contourArea,reverse=True)
for c in contour:
approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True)
if len(approx)==4:
#cv2.drawContours(image,[approx],0,(0,255,0),thickness=3)
break
warp=four_point_transform(image,approx.reshape(4,2))
warp_gri=four_point_transform(gray,approx.reshape(4,2))
return warp,warp_gri
def soru_grup_contour_bul(resim,gri):
thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1]
can=cv2.Canny(thr2,50,100)
can=cv2.dilate(can,None,iterations=3)
coords=[]
cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
cont=imutils.grab_contours(cont)
for c in cont:
approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True)
area=cv2.contourArea(approx)
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
if cv2.contourArea(c)>30 and ar>=0.9 and ar<=1.1:
box=cv2.minAreaRect(approx)
box=cv2.boxPoints(box)
box=np.array(box,dtype=np.int)
if cv2.contourArea(box)>150:
coords.append(approx)
cv2.drawContours(resim,[box],0,(0,0,255),thickness=3)
if len(coords)==5:
return coords
else:
return 0
def tekrar_bul(array,koordinat):
for c in array:
if koordinat==c[0] or abs(koordinat-c[0])<15:
return True #Tekrar var
else:
pass
return False
def contour_bul(isim,isim_gri,karmasiklik=0):
coord=[]
thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8)
#thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
ar_value=200
#if karmasiklik==1:
# ar_value=800
cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
x_coords=[(0,0)]
sayac=0
cont=imutils.grab_contours(cont)
cont=contours.sort_contours(cont,method="top-to-bottom")[0]
for c in cont:
approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True)
area=cv2.contourArea(approx)
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
if area<1300 and area>300 and ar>=0.9 and ar<=1.1:
box=cv2.minAreaRect(approx)
box=cv2.boxPoints(box)
box=np.array(box,dtype=np.int)
M=cv2.moments(box)
x=int(M['m10']/M['m00'])
y=int(M['m01']/M['m00'])
# print(x,y)
res=tekrar_bul(x_coords,x)
if res is False and abs(x_coords[-1][1]-y)<35:
coord.append(approx)
x_coords.append((x,y))
sayac+=1
#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)
#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)
elif abs(x_coords[-1][1]-y)>=35:
coord.append(approx)
x_coords=[(0,0)]
sayac+=1
x_coords.append((x,y))
#cv2.drawContours(isim,[box],0,(255,0,0),thickness=3)
#cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2)
else:
continue
return coord,thr6
def contour_cizdir(resim,cont,isim="default"):
for c in cont:
cv2.drawContours(resim,[c],0,(0,255,0),thickness=4)
#print(f"Bulunan contour sayısı: {len(cont)}")
def bolge_bul(resim,gri):
bolgeler={}
thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8)
areas=[]
cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
cont=imutils.grab_contours(cont)
temp=[]
cont=contours.sort_contours(cont,"top-to-bottom")[0]
a=0
for c in cont:
approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True)
if cv2.contourArea(approx)>10050 and len(approx)==4:
a+=1
M=cv2.moments(approx)
x=int(M['m10']/M['m00'])
y=int(M['m01']/M['m00'])
#areas.append([a,cv2.contourArea(approx)])
#cv2.putText(resim,"{}".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3)
temp.append(approx.reshape(4,2))
areas.append([a,cv2.contourArea(approx)])
#cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3)
#cv2.imshow("resim_olge",imutils.resize(resim,height=650))
if len(temp)>=5:
bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]}
areas=sorted(areas,key=lambda x:x[1],reverse=True)
return bolgeler,areas
def cevap_islemleri(cevap,coords,col_no=1):
iki_cevap=0
bos=0
dogru=0
q_no=0
yanlıs=0
if col_no==1:
pass
elif col_no==2:
q_no=30
elif col_no==3:
q_no=60
elif col_no==4:
q_no=90
yanit=[]
#cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY)
thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="top-to-bottom")[0]
for (s,i) in enumerate(np.arange(0,len(coords),5)):
cevap=None
cnt=contours.sort_contours(coords[i:i+5])[0]
toplam_beyaz=None
say=0
for (j,c) in enumerate(cnt):
if len(cevap_anahtar)<=q_no+s:
return (dogru,yanlıs,bos,iki_cevap)
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
plt.imshow(maske,cmap='gray')
#plt.show()
toplam_beyaz=cv2.countNonZero(maske)
#print(toplam_beyaz,j)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,q_no+s)
if toplam_beyaz>800:
say+=1
if say>1: #İKİ ŞIK İŞARETLEME DURUMU
iki_cevap+=1
continue
elif cevap[0]<800:# BOŞ BIRAKMA DURUMU
bos+=1
continue
else:
if cevap_anahtar[q_no+s]== cevap[1]:
#print(cevap_anahtar[q_no+s],cevap[1])
dogru+=1
else:
yanlıs+=1
'''
NUMBER OF TRUE,FALSE,NOT MARKED AND MARKED MORE THAN 1
'''
return(dogru,yanlıs,bos,iki_cevap)
def isim_islemleri(isim,coords,thresh):
a=0
yanit=[]
ad_str=""
coords=contours.sort_contours(coords,method="left-to-right")[0]
for (s,i) in enumerate(np.arange(0,len(coords),32)):
cevap=None
cnt=contours.sort_contours(coords[i:i+32],method="top-to-bottom")[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
#plt.imshow(maske,cmap='gray')
#plt.show()
#a+=1
toplam_beyaz=cv2.countNonZero(maske)
#print(toplam_beyaz,j)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
# print("cevap",cevap)
if cevap[0]>500:
yanit.append(alfabe[cevap[1]])
elif cevap[0]<600:
yanit.append(" ")
for s in yanit:
ad_str+=s
return ad_str
def cevap_kolon(cevap):
pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)])
pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)])
pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)])
pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)])
col1=four_point_transform(cevap,pts1)
col2=four_point_transform(cevap,pts2)
col3=four_point_transform(cevap,pts3)
col4=four_point_transform(cevap,pts4)
return col1,col2,col3,col4
def cevap_gri(col1,col2,col3,col4):
'''
KOLONLARI GRİ YAPMAK İÇİN,MAİNDE YER KAPLAMASIN
'''
col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY)
col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY)
col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY)
col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY)
return col1_gri,col2_gri,col3_gri,col4_gri
def cevap_contour(col1,col2,col3,col4):
col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4)
col1_coord=cevap_contour_bul(col1,col1_gri)
col2_coord=cevap_contour_bul(col2,col1_gri)
col3_coord=cevap_contour_bul(col3,col1_gri)
col4_coord=cevap_contour_bul(col4,col1_gri)
return col1_coord,col2_coord,col3_coord,col4_coord
def ogrno_islemleri(ogrno,ogrno_gri,coords):
yanit=""
thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="left-to-right")[0]
for (s,i) in enumerate(np.arange(0,len(coords),10)):
cevap=None
cnt=contours.sort_contours(coords[i:i+10],method="top-to-bottom")[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
plt.imshow(maske,cmap='gray')
#plt.show()
toplam_beyaz=cv2.countNonZero(maske)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
if cevap[0]>500:
yanit+=str(cevap[1])
print("Okul Numarası:",yanit)
def sinav_islemleri(sinav,sinav_gri,coords):
yanit=["QUİZ","ARA","FİNAL","BÜTÜNLEME"]
thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="top-to-bottom")[0]
for (s,i) in enumerate(np.arange(0,len(coords),10)):
cevap=None
cnt=contours.sort_contours(coords[i:i+10],method="left-to-right")[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
plt.imshow(maske,cmap='gray')
#plt.show()
toplam_beyaz=cv2.countNonZero(maske)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
return yanit[cevap[1]]
def sorugrup_islemleri(soru,soru_gri,coords):
yanit=["A","B","C","D","E"]
sayac=0
thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1]
coords=contours.sort_contours(coords,method="top-to-bottom")[0]
for (s,i) in enumerate(np.arange(0,len(coords),10)):
cevap=None
cnt=contours.sort_contours(coords[i:i+10],method="left-to-right")[0]
toplam_beyaz=None
for (j,c) in enumerate(cnt):
maske=np.zeros(thresh.shape,dtype=np.uint8)
cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1)
maske=cv2.bitwise_and(thresh,thresh,mask=maske)
plt.imshow(maske,cmap='gray')
#plt.show()
sayac+=1
toplam_beyaz=cv2.countNonZero(maske)
if cevap is None or toplam_beyaz>cevap[0]:
cevap=(toplam_beyaz,j,s)
if sayac==5:
break
print(cevap)
if cevap[0]>500:
return yanit[cevap[1]]
#print("tespit edilemedi")
return "Tespit edilemedi"
####################################################################
def main_starter(bos_kagit,dolu_kagit):
image=cv2.imread(bos_kagit)
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
kagit,kagit_gri=kagit_bul(image,gray)
bolgeler,areas=bolge_bul(kagit,kagit_gri)
'''
FIND SCHOOL NUMBER PART
'''
ogrno_bos=four_point_transform(kagit,bolgeler['ogrno'])
ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno'])
ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri)
contour_cizdir(ogrno_bos_gri,ogrno_coord,"ogrenci numarası")
#v2.imshow("ogrno",imutils.resize(ogrno_bos,height=400))
'''
DIVIDE ANSWER PART INTO 4 SLICES AND FIND ONE BY ONE
'''
cevap_bos=four_point_transform(kagit,bolgeler['cevaplar'])
cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar'])
col1,col2,col3,col4=cevap_kolon(cevap_bos)
col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4)
col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4)
#contour_cizdir(col1,col1_coord)
#cevap_islemleri(col2_gri,coord_cevap)
'''
EXAM TYPE FIND PART
'''
sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu'])
sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu'])
sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri)
sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord)
#cv2.imshow("sınav türü",sinav_bos_gri)
'''
OTHER PARTS THAT ON PAPER
'''
sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu'])
sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu'])
sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1)
coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri)
soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1)
###############################
ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay'])
ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay'])
ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1)
'''
NAME FIND PART.
'''
isim_bos=four_point_transform(kagit,bolgeler['isim'])
isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY)
coord_isim, thres=contour_bul(isim_bos, isim_bos_gri)
#contour_cizdir(isim_bos,coord,"isim_bos")
#cevap_islemleri(cevap_bos_gri,coord)
##############################################
resim=cv2.imread(dolu_kagit)
resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY)
warp2,warp2_gri=kagit_bul(resim,resim_gri)
bolgeler2,areas2=bolge_bul(warp2,warp2_gri)
ret,warp2=ters_bul(warp2,areas2)
'''
TERS İSE TEKRAR BOLGELERİ BUL
'''
if ret==True:
warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY)
bolgeler2,areas2=bolge_bul(warp2,warp2_gri)
else:
pass
isim_dolu=four_point_transform(warp2,bolgeler2['isim'])
isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY)
contour_cizdir(isim_dolu,coord_isim,"dolu_kagit_contourlu")
'''
OGRETİM ONAY DOLU KAGIT
'''
ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay'])
ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY)
ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont)
print("Öğretim Onayı:",ogret_onay)
#cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3)
#cv2.imshow("ogretc",ogretim_dolu)
#ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord)
sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu'])
sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY)
soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont)
print("Soru Grubu",soru_tur)
thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1]
isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu)
print(isim_str)
sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu'])
sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY)
sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord)
print("Sınav Türü: ",sinav_turu)
ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno'])
ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY)
ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord)
cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar'])
cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY)
col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu)
col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu)
#contour_cizdir(col1_dolu,col1_coord,"colon1 dolu")
if len(cevap_anahtar)<=30:
basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1)
elif len(cevap_anahtar)<=60:
basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1)
basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2)
basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3])
#print(basarim)
elif len(cevap_anahtar)<=90:
basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1)
basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2)
basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3)
basarim=basarim1+basarim2+basarim3
elif len(cevap_anahtar)<=120:
basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1)
basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2)
basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3)
basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4)
basarim=basarim1+basarim2+basarim3+basarim4
print(f"Doğru cevap sayısı:{basarim[0]}\nYanlış cevap sayısı:{basarim[1]}\nBoş sayısı:{basarim[2]}\nİki cevap işaret:{basarim[3]}")
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__':
bos_kagit="optic_empty.jpg"
dolu_kagit="optic_marked.jpg"
main_starter(bos_kagit,dolu_kagit)
| [((1561, 1659), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['isim_gri', '(255)', 'cv2.ADAPTIVE_THRESH_MEAN_C', 'cv2.THRESH_BINARY_INV', '(9)', '(8)'], {}), '(isim_gri, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.\n THRESH_BINARY_INV, 9, 8)\n', (1582, 1659), False, 'import cv2\n'), ((1673, 1740), 'cv2.findContours', 'cv2.findContours', (['thresholded', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_NONE'], {}), '(thresholded, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n', (1689, 1740), False, 'import cv2\n'), ((1795, 1825), 'imutils.grab_contours', 'imutils.grab_contours', (['contour'], {}), '(contour)\n', (1816, 1825), False, 'import imutils\n'), ((3581, 3644), 'cv2.findContours', 'cv2.findContours', (['thr', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(thr, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (3597, 3644), False, 'import cv2\n'), ((3656, 3686), 'imutils.grab_contours', 'imutils.grab_contours', (['contour'], {}), '(contour)\n', (3677, 3686), False, 'import imutils\n'), ((4237, 4261), 'cv2.Canny', 'cv2.Canny', (['thr2', '(50)', '(100)'], {}), '(thr2, 50, 100)\n', (4246, 4261), False, 'import cv2\n'), ((4269, 4304), 'cv2.dilate', 'cv2.dilate', (['can', 'None'], {'iterations': '(3)'}), '(can, None, iterations=3)\n', (4279, 4304), False, 'import cv2\n'), ((4332, 4395), 'cv2.findContours', 'cv2.findContours', (['can', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(can, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (4348, 4395), False, 'import cv2\n'), ((4404, 4431), 'imutils.grab_contours', 'imutils.grab_contours', (['cont'], {}), '(cont)\n', (4425, 4431), False, 'import imutils\n'), ((5317, 5415), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['isim_gri', '(255)', 'cv2.ADAPTIVE_THRESH_MEAN_C', 'cv2.THRESH_BINARY_INV', '(9)', '(8)'], {}), '(isim_gri, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.\n THRESH_BINARY_INV, 9, 8)\n', (5338, 5415), False, 'import cv2\n'), ((5571, 5631), 'cv2.findContours', 'cv2.findContours', (['thr6', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_NONE'], {}), '(thr6, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n', (5587, 5631), False, 'import cv2\n'), ((5683, 5710), 'imutils.grab_contours', 'imutils.grab_contours', (['cont'], {}), '(cont)\n', (5704, 5710), False, 'import imutils\n'), ((7306, 7399), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gri', '(255)', 'cv2.ADAPTIVE_THRESH_MEAN_C', 'cv2.THRESH_BINARY_INV', '(9)', '(8)'], {}), '(gri, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.\n THRESH_BINARY_INV, 9, 8)\n', (7327, 7399), False, 'import cv2\n'), ((7414, 7478), 'cv2.findContours', 'cv2.findContours', (['thr2', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(thr2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (7430, 7478), False, 'import cv2\n'), ((7487, 7514), 'imutils.grab_contours', 'imutils.grab_contours', (['cont'], {}), '(cont)\n', (7508, 7514), False, 'import imutils\n'), ((11427, 11481), 'numpy.array', 'np.array', (['[(2, 50), (300, 50), (2, 1545), (300, 1545)]'], {}), '([(2, 50), (300, 50), (2, 1545), (300, 1545)])\n', (11435, 11481), True, 'import numpy as np\n'), ((11485, 11543), 'numpy.array', 'np.array', (['[(300, 50), (600, 50), (302, 1545), (602, 1545)]'], {}), '([(300, 50), (600, 50), (302, 1545), (602, 1545)])\n', (11493, 11543), True, 'import numpy as np\n'), ((11547, 11605), 'numpy.array', 'np.array', (['[(600, 50), (900, 50), (602, 1545), (902, 1545)]'], {}), '([(600, 50), (900, 50), (602, 1545), (902, 1545)])\n', (11555, 11605), True, 'import numpy as np\n'), ((11609, 11669), 'numpy.array', 'np.array', (['[(900, 50), (1200, 50), (902, 1545), (1202, 1545)]'], {}), '([(900, 50), (1200, 50), (902, 1545), (1202, 1545)])\n', (11617, 11669), True, 'import numpy as np\n'), ((11675, 11708), 'imutils.perspective.four_point_transform', 'four_point_transform', (['cevap', 'pts1'], {}), '(cevap, pts1)\n', (11695, 11708), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((11718, 11751), 'imutils.perspective.four_point_transform', 'four_point_transform', (['cevap', 'pts2'], {}), '(cevap, pts2)\n', (11738, 11751), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((11761, 11794), 'imutils.perspective.four_point_transform', 'four_point_transform', (['cevap', 'pts3'], {}), '(cevap, pts3)\n', (11781, 11794), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((11804, 11837), 'imutils.perspective.four_point_transform', 'four_point_transform', (['cevap', 'pts4'], {}), '(cevap, pts4)\n', (11824, 11837), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((11993, 12031), 'cv2.cvtColor', 'cv2.cvtColor', (['col1', 'cv2.COLOR_BGR2GRAY'], {}), '(col1, cv2.COLOR_BGR2GRAY)\n', (12005, 12031), False, 'import cv2\n'), ((12045, 12083), 'cv2.cvtColor', 'cv2.cvtColor', (['col2', 'cv2.COLOR_BGR2GRAY'], {}), '(col2, cv2.COLOR_BGR2GRAY)\n', (12057, 12083), False, 'import cv2\n'), ((12097, 12135), 'cv2.cvtColor', 'cv2.cvtColor', (['col3', 'cv2.COLOR_BGR2GRAY'], {}), '(col3, cv2.COLOR_BGR2GRAY)\n', (12109, 12135), False, 'import cv2\n'), ((12149, 12187), 'cv2.cvtColor', 'cv2.cvtColor', (['col4', 'cv2.COLOR_BGR2GRAY'], {}), '(col4, cv2.COLOR_BGR2GRAY)\n', (12161, 12187), False, 'import cv2\n'), ((15633, 15654), 'cv2.imread', 'cv2.imread', (['bos_kagit'], {}), '(bos_kagit)\n', (15643, 15654), False, 'import cv2\n'), ((15665, 15704), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (15677, 15704), False, 'import cv2\n'), ((15866, 15912), 'imutils.perspective.four_point_transform', 'four_point_transform', (['kagit', "bolgeler['ogrno']"], {}), "(kagit, bolgeler['ogrno'])\n", (15886, 15912), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((15931, 15981), 'imutils.perspective.four_point_transform', 'four_point_transform', (['kagit_gri', "bolgeler['ogrno']"], {}), "(kagit_gri, bolgeler['ogrno'])\n", (15951, 15981), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((16277, 16326), 'imutils.perspective.four_point_transform', 'four_point_transform', (['kagit', "bolgeler['cevaplar']"], {}), "(kagit, bolgeler['cevaplar'])\n", (16297, 16326), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((16345, 16398), 'imutils.perspective.four_point_transform', 'four_point_transform', (['kagit_gri', "bolgeler['cevaplar']"], {}), "(kagit_gri, bolgeler['cevaplar'])\n", (16365, 16398), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((16750, 16801), 'imutils.perspective.four_point_transform', 'four_point_transform', (['kagit', "bolgeler['sinav_turu']"], {}), "(kagit, bolgeler['sinav_turu'])\n", (16770, 16801), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((16820, 16875), 'imutils.perspective.four_point_transform', 'four_point_transform', (['kagit_gri', "bolgeler['sinav_turu']"], {}), "(kagit_gri, bolgeler['sinav_turu'])\n", (16840, 16875), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((17131, 17182), 'imutils.perspective.four_point_transform', 'four_point_transform', (['kagit', "bolgeler['soru_grubu']"], {}), "(kagit, bolgeler['soru_grubu'])\n", (17151, 17182), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((17204, 17259), 'imutils.perspective.four_point_transform', 'four_point_transform', (['kagit_gri', "bolgeler['soru_grubu']"], {}), "(kagit_gri, bolgeler['soru_grubu'])\n", (17224, 17259), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((17537, 17590), 'imutils.perspective.four_point_transform', 'four_point_transform', (['kagit', "bolgeler['ogretim_onay']"], {}), "(kagit, bolgeler['ogretim_onay'])\n", (17557, 17590), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((17611, 17668), 'imutils.perspective.four_point_transform', 'four_point_transform', (['kagit_gri', "bolgeler['ogretim_onay']"], {}), "(kagit_gri, bolgeler['ogretim_onay'])\n", (17631, 17668), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((17830, 17875), 'imutils.perspective.four_point_transform', 'four_point_transform', (['kagit', "bolgeler['isim']"], {}), "(kagit, bolgeler['isim'])\n", (17850, 17875), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((17893, 17935), 'cv2.cvtColor', 'cv2.cvtColor', (['isim_bos', 'cv2.COLOR_BGR2GRAY'], {}), '(isim_bos, cv2.COLOR_BGR2GRAY)\n', (17905, 17935), False, 'import cv2\n'), ((18172, 18194), 'cv2.imread', 'cv2.imread', (['dolu_kagit'], {}), '(dolu_kagit)\n', (18182, 18194), False, 'import cv2\n'), ((18210, 18249), 'cv2.cvtColor', 'cv2.cvtColor', (['resim', 'cv2.COLOR_BGR2GRAY'], {}), '(resim, cv2.COLOR_BGR2GRAY)\n', (18222, 18249), False, 'import cv2\n'), ((18625, 18671), 'imutils.perspective.four_point_transform', 'four_point_transform', (['warp2', "bolgeler2['isim']"], {}), "(warp2, bolgeler2['isim'])\n", (18645, 18671), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((18690, 18733), 'cv2.cvtColor', 'cv2.cvtColor', (['isim_dolu', 'cv2.COLOR_BGR2GRAY'], {}), '(isim_dolu, cv2.COLOR_BGR2GRAY)\n', (18702, 18733), False, 'import cv2\n'), ((18869, 18923), 'imutils.perspective.four_point_transform', 'four_point_transform', (['warp2', "bolgeler2['ogretim_onay']"], {}), "(warp2, bolgeler2['ogretim_onay'])\n", (18889, 18923), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((18945, 18991), 'cv2.cvtColor', 'cv2.cvtColor', (['ogretim_dolu', 'cv2.COLOR_BGR2GRAY'], {}), '(ogretim_dolu, cv2.COLOR_BGR2GRAY)\n', (18957, 18991), False, 'import cv2\n'), ((19338, 19390), 'imutils.perspective.four_point_transform', 'four_point_transform', (['warp2', "bolgeler2['soru_grubu']"], {}), "(warp2, bolgeler2['soru_grubu'])\n", (19358, 19390), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((19413, 19460), 'cv2.cvtColor', 'cv2.cvtColor', (['sorugrup_dolu', 'cv2.COLOR_BGR2GRAY'], {}), '(sorugrup_dolu, cv2.COLOR_BGR2GRAY)\n', (19425, 19460), False, 'import cv2\n'), ((19783, 19835), 'imutils.perspective.four_point_transform', 'four_point_transform', (['warp2', "bolgeler2['sinav_turu']"], {}), "(warp2, bolgeler2['sinav_turu'])\n", (19803, 19835), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((19855, 19899), 'cv2.cvtColor', 'cv2.cvtColor', (['sinav_dolu', 'cv2.COLOR_BGR2GRAY'], {}), '(sinav_dolu, cv2.COLOR_BGR2GRAY)\n', (19867, 19899), False, 'import cv2\n'), ((20030, 20077), 'imutils.perspective.four_point_transform', 'four_point_transform', (['warp2', "bolgeler2['ogrno']"], {}), "(warp2, bolgeler2['ogrno'])\n", (20050, 20077), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((20097, 20141), 'cv2.cvtColor', 'cv2.cvtColor', (['ogrno_dolu', 'cv2.COLOR_BGR2GRAY'], {}), '(ogrno_dolu, cv2.COLOR_BGR2GRAY)\n', (20109, 20141), False, 'import cv2\n'), ((20221, 20271), 'imutils.perspective.four_point_transform', 'four_point_transform', (['warp2', "bolgeler2['cevaplar']"], {}), "(warp2, bolgeler2['cevaplar'])\n", (20241, 20271), False, 'from imutils.perspective import four_point_transform, order_points\n'), ((20291, 20335), 'cv2.cvtColor', 'cv2.cvtColor', (['cevap_dolu', 'cv2.COLOR_BGR2GRAY'], {}), '(cevap_dolu, cv2.COLOR_BGR2GRAY)\n', (20303, 20335), False, 'import cv2\n'), ((21732, 21745), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (21743, 21745), False, 'import cv2\n'), ((21751, 21774), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (21772, 21774), False, 'import cv2\n'), ((803, 855), 'cv2.threshold', 'cv2.threshold', (['isim', '(179)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(isim, 179, 255, cv2.THRESH_BINARY_INV)\n', (816, 855), False, 'import cv2\n'), ((868, 922), 'imutils.contours.sort_contours', 'contours.sort_contours', (['coords'], {'method': '"""top-to-bottom"""'}), "(coords, method='top-to-bottom')\n", (890, 922), False, 'from imutils import contours\n'), ((1839, 1894), 'imutils.contours.sort_contours', 'contours.sort_contours', (['contour'], {'method': '"""top-to-bottom"""'}), "(contour, method='top-to-bottom')\n", (1861, 1894), False, 'from imutils import contours\n'), ((2004, 2027), 'cv2.contourArea', 'cv2.contourArea', (['approx'], {}), '(approx)\n', (2019, 2027), False, 'import cv2\n'), ((2054, 2078), 'cv2.boundingRect', 'cv2.boundingRect', (['approx'], {}), '(approx)\n', (2070, 2078), False, 'import cv2\n'), ((3308, 3340), 'imutils.rotate', 'imutils.rotate', (['kagit'], {'angle': '(180)'}), '(kagit, angle=180)\n', (3322, 3340), False, 'import imutils\n'), ((3517, 3565), 'cv2.threshold', 'cv2.threshold', (['gray', '(150)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray, 150, 255, cv2.THRESH_BINARY)\n', (3530, 3565), False, 'import cv2\n'), ((4160, 4227), 'cv2.threshold', 'cv2.threshold', (['gri', '(0)', '(255)', '(cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)'], {}), '(gri, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n', (4173, 4227), False, 'import cv2\n'), ((4536, 4559), 'cv2.contourArea', 'cv2.contourArea', (['approx'], {}), '(approx)\n', (4551, 4559), False, 'import cv2\n'), ((4586, 4610), 'cv2.boundingRect', 'cv2.boundingRect', (['approx'], {}), '(approx)\n', (4602, 4610), False, 'import cv2\n'), ((5721, 5773), 'imutils.contours.sort_contours', 'contours.sort_contours', (['cont'], {'method': '"""top-to-bottom"""'}), "(cont, method='top-to-bottom')\n", (5743, 5773), False, 'from imutils import contours\n'), ((5880, 5903), 'cv2.contourArea', 'cv2.contourArea', (['approx'], {}), '(approx)\n', (5895, 5903), False, 'import cv2\n'), ((5930, 5954), 'cv2.boundingRect', 'cv2.boundingRect', (['approx'], {}), '(approx)\n', (5946, 5954), False, 'import cv2\n'), ((7142, 7199), 'cv2.drawContours', 'cv2.drawContours', (['resim', '[c]', '(0)', '(0, 255, 0)'], {'thickness': '(4)'}), '(resim, [c], 0, (0, 255, 0), thickness=4)\n', (7158, 7199), False, 'import cv2\n'), ((7538, 7583), 'imutils.contours.sort_contours', 'contours.sort_contours', (['cont', '"""top-to-bottom"""'], {}), "(cont, 'top-to-bottom')\n", (7560, 7583), False, 'from imutils import contours\n'), ((8879, 8932), 'cv2.threshold', 'cv2.threshold', (['cevap', '(180)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(cevap, 180, 255, cv2.THRESH_BINARY_INV)\n', (8892, 8932), False, 'import cv2\n'), ((8947, 9001), 'imutils.contours.sort_contours', 'contours.sort_contours', (['coords'], {'method': '"""top-to-bottom"""'}), "(coords, method='top-to-bottom')\n", (8969, 9001), False, 'from imutils import contours\n'), ((10421, 10475), 'imutils.contours.sort_contours', 'contours.sort_contours', (['coords'], {'method': '"""left-to-right"""'}), "(coords, method='left-to-right')\n", (10443, 10475), False, 'from imutils import contours\n'), ((12678, 12735), 'cv2.threshold', 'cv2.threshold', (['ogrno_gri', '(180)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(ogrno_gri, 180, 255, cv2.THRESH_BINARY_INV)\n', (12691, 12735), False, 'import cv2\n'), ((12748, 12802), 'imutils.contours.sort_contours', 'contours.sort_contours', (['coords'], {'method': '"""left-to-right"""'}), "(coords, method='left-to-right')\n", (12770, 12802), False, 'from imutils import contours\n'), ((13643, 13700), 'cv2.threshold', 'cv2.threshold', (['sinav_gri', '(180)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(sinav_gri, 180, 255, cv2.THRESH_BINARY_INV)\n', (13656, 13700), False, 'import cv2\n'), ((13713, 13767), 'imutils.contours.sort_contours', 'contours.sort_contours', (['coords'], {'method': '"""top-to-bottom"""'}), "(coords, method='top-to-bottom')\n", (13735, 13767), False, 'from imutils import contours\n'), ((14544, 14600), 'cv2.threshold', 'cv2.threshold', (['soru_gri', '(180)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(soru_gri, 180, 255, cv2.THRESH_BINARY_INV)\n', (14557, 14600), False, 'import cv2\n'), ((14613, 14667), 'imutils.contours.sort_contours', 'contours.sort_contours', (['coords'], {'method': '"""top-to-bottom"""'}), "(coords, method='top-to-bottom')\n", (14635, 14667), False, 'from imutils import contours\n'), ((18487, 18526), 'cv2.cvtColor', 'cv2.cvtColor', (['warp2', 'cv2.COLOR_BGR2GRAY'], {}), '(warp2, cv2.COLOR_BGR2GRAY)\n', (18499, 18526), False, 'import cv2\n'), ((19599, 19676), 'cv2.threshold', 'cv2.threshold', (['isim_dolu_gri', '(0)', '(255)', '(cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)'], {}), '(isim_dolu_gri, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n', (19612, 19676), False, 'import cv2\n'), ((1016, 1056), 'imutils.contours.sort_contours', 'contours.sort_contours', (['coords[i:i + 30]'], {}), '(coords[i:i + 30])\n', (1038, 1056), False, 'from imutils import contours\n'), ((1144, 1182), 'numpy.zeros', 'np.zeros', (['thresh.shape'], {'dtype': 'np.uint8'}), '(thresh.shape, dtype=np.uint8)\n', (1152, 1182), True, 'import numpy as np\n'), ((1197, 1259), 'cv2.drawContours', 'cv2.drawContours', (['maske', '[c]', '(0)', '(255, 255, 255)'], {'thickness': '(-1)'}), '(maske, [c], 0, (255, 255, 255), thickness=-1)\n', (1213, 1259), False, 'import cv2\n'), ((1273, 1316), 'cv2.bitwise_and', 'cv2.bitwise_and', (['thresh', 'thresh'], {'mask': 'maske'}), '(thresh, thresh, mask=maske)\n', (1288, 1316), False, 'import cv2\n'), ((1361, 1384), 'cv2.countNonZero', 'cv2.countNonZero', (['maske'], {}), '(maske)\n', (1377, 1384), False, 'import cv2\n'), ((2196, 2219), 'cv2.minAreaRect', 'cv2.minAreaRect', (['approx'], {}), '(approx)\n', (2211, 2219), False, 'import cv2\n'), ((2237, 2255), 'cv2.boxPoints', 'cv2.boxPoints', (['box'], {}), '(box)\n', (2250, 2255), False, 'import cv2\n'), ((2273, 2300), 'numpy.array', 'np.array', (['box'], {'dtype': 'np.int'}), '(box, dtype=np.int)\n', (2281, 2300), True, 'import numpy as np\n'), ((2315, 2331), 'cv2.moments', 'cv2.moments', (['box'], {}), '(box)\n', (2326, 2331), False, 'import cv2\n'), ((4716, 4739), 'cv2.minAreaRect', 'cv2.minAreaRect', (['approx'], {}), '(approx)\n', (4731, 4739), False, 'import cv2\n'), ((4757, 4775), 'cv2.boxPoints', 'cv2.boxPoints', (['box'], {}), '(box)\n', (4770, 4775), False, 'import cv2\n'), ((4793, 4820), 'numpy.array', 'np.array', (['box'], {'dtype': 'np.int'}), '(box, dtype=np.int)\n', (4801, 4820), True, 'import numpy as np\n'), ((6072, 6095), 'cv2.minAreaRect', 'cv2.minAreaRect', (['approx'], {}), '(approx)\n', (6087, 6095), False, 'import cv2\n'), ((6113, 6131), 'cv2.boxPoints', 'cv2.boxPoints', (['box'], {}), '(box)\n', (6126, 6131), False, 'import cv2\n'), ((6149, 6176), 'numpy.array', 'np.array', (['box'], {'dtype': 'np.int'}), '(box, dtype=np.int)\n', (6157, 6176), True, 'import numpy as np\n'), ((6191, 6207), 'cv2.moments', 'cv2.moments', (['box'], {}), '(box)\n', (6202, 6207), False, 'import cv2\n'), ((7783, 7802), 'cv2.moments', 'cv2.moments', (['approx'], {}), '(approx)\n', (7794, 7802), False, 'import cv2\n'), ((9094, 9133), 'imutils.contours.sort_contours', 'contours.sort_contours', (['coords[i:i + 5]'], {}), '(coords[i:i + 5])\n', (9116, 9133), False, 'from imutils import contours\n'), ((9335, 9373), 'numpy.zeros', 'np.zeros', (['thresh.shape'], {'dtype': 'np.uint8'}), '(thresh.shape, dtype=np.uint8)\n', (9343, 9373), True, 'import numpy as np\n'), ((9388, 9450), 'cv2.drawContours', 'cv2.drawContours', (['maske', '[c]', '(0)', '(255, 255, 255)'], {'thickness': '(-1)'}), '(maske, [c], 0, (255, 255, 255), thickness=-1)\n', (9404, 9450), False, 'import cv2\n'), ((9464, 9507), 'cv2.bitwise_and', 'cv2.bitwise_and', (['thresh', 'thresh'], {'mask': 'maske'}), '(thresh, thresh, mask=maske)\n', (9479, 9507), False, 'import cv2\n'), ((9519, 9549), 'matplotlib.pyplot.imshow', 'plt.imshow', (['maske'], {'cmap': '"""gray"""'}), "(maske, cmap='gray')\n", (9529, 9549), True, 'import matplotlib.pyplot as plt\n'), ((9602, 9625), 'cv2.countNonZero', 'cv2.countNonZero', (['maske'], {}), '(maske)\n', (9618, 9625), False, 'import cv2\n'), ((10569, 10633), 'imutils.contours.sort_contours', 'contours.sort_contours', (['coords[i:i + 32]'], {'method': '"""top-to-bottom"""'}), "(coords[i:i + 32], method='top-to-bottom')\n", (10591, 10633), False, 'from imutils import contours\n'), ((10720, 10758), 'numpy.zeros', 'np.zeros', (['thresh.shape'], {'dtype': 'np.uint8'}), '(thresh.shape, dtype=np.uint8)\n', (10728, 10758), True, 'import numpy as np\n'), ((10773, 10835), 'cv2.drawContours', 'cv2.drawContours', (['maske', '[c]', '(0)', '(255, 255, 255)'], {'thickness': '(-1)'}), '(maske, [c], 0, (255, 255, 255), thickness=-1)\n', (10789, 10835), False, 'import cv2\n'), ((10849, 10892), 'cv2.bitwise_and', 'cv2.bitwise_and', (['thresh', 'thresh'], {'mask': 'maske'}), '(thresh, thresh, mask=maske)\n', (10864, 10892), False, 'import cv2\n'), ((11005, 11028), 'cv2.countNonZero', 'cv2.countNonZero', (['maske'], {}), '(maske)\n', (11021, 11028), False, 'import cv2\n'), ((12896, 12960), 'imutils.contours.sort_contours', 'contours.sort_contours', (['coords[i:i + 10]'], {'method': '"""top-to-bottom"""'}), "(coords[i:i + 10], method='top-to-bottom')\n", (12918, 12960), False, 'from imutils import contours\n'), ((13047, 13085), 'numpy.zeros', 'np.zeros', (['thresh.shape'], {'dtype': 'np.uint8'}), '(thresh.shape, dtype=np.uint8)\n', (13055, 13085), True, 'import numpy as np\n'), ((13100, 13162), 'cv2.drawContours', 'cv2.drawContours', (['maske', '[c]', '(0)', '(255, 255, 255)'], {'thickness': '(-1)'}), '(maske, [c], 0, (255, 255, 255), thickness=-1)\n', (13116, 13162), False, 'import cv2\n'), ((13176, 13219), 'cv2.bitwise_and', 'cv2.bitwise_and', (['thresh', 'thresh'], {'mask': 'maske'}), '(thresh, thresh, mask=maske)\n', (13191, 13219), False, 'import cv2\n'), ((13231, 13261), 'matplotlib.pyplot.imshow', 'plt.imshow', (['maske'], {'cmap': '"""gray"""'}), "(maske, cmap='gray')\n", (13241, 13261), True, 'import matplotlib.pyplot as plt\n'), ((13314, 13337), 'cv2.countNonZero', 'cv2.countNonZero', (['maske'], {}), '(maske)\n', (13330, 13337), False, 'import cv2\n'), ((13861, 13925), 'imutils.contours.sort_contours', 'contours.sort_contours', (['coords[i:i + 10]'], {'method': '"""left-to-right"""'}), "(coords[i:i + 10], method='left-to-right')\n", (13883, 13925), False, 'from imutils import contours\n'), ((14012, 14050), 'numpy.zeros', 'np.zeros', (['thresh.shape'], {'dtype': 'np.uint8'}), '(thresh.shape, dtype=np.uint8)\n', (14020, 14050), True, 'import numpy as np\n'), ((14065, 14127), 'cv2.drawContours', 'cv2.drawContours', (['maske', '[c]', '(0)', '(255, 255, 255)'], {'thickness': '(-1)'}), '(maske, [c], 0, (255, 255, 255), thickness=-1)\n', (14081, 14127), False, 'import cv2\n'), ((14141, 14184), 'cv2.bitwise_and', 'cv2.bitwise_and', (['thresh', 'thresh'], {'mask': 'maske'}), '(thresh, thresh, mask=maske)\n', (14156, 14184), False, 'import cv2\n'), ((14196, 14226), 'matplotlib.pyplot.imshow', 'plt.imshow', (['maske'], {'cmap': '"""gray"""'}), "(maske, cmap='gray')\n", (14206, 14226), True, 'import matplotlib.pyplot as plt\n'), ((14279, 14302), 'cv2.countNonZero', 'cv2.countNonZero', (['maske'], {}), '(maske)\n', (14295, 14302), False, 'import cv2\n'), ((14761, 14825), 'imutils.contours.sort_contours', 'contours.sort_contours', (['coords[i:i + 10]'], {'method': '"""left-to-right"""'}), "(coords[i:i + 10], method='left-to-right')\n", (14783, 14825), False, 'from imutils import contours\n'), ((14912, 14950), 'numpy.zeros', 'np.zeros', (['thresh.shape'], {'dtype': 'np.uint8'}), '(thresh.shape, dtype=np.uint8)\n', (14920, 14950), True, 'import numpy as np\n'), ((14965, 15027), 'cv2.drawContours', 'cv2.drawContours', (['maske', '[c]', '(0)', '(255, 255, 255)'], {'thickness': '(-1)'}), '(maske, [c], 0, (255, 255, 255), thickness=-1)\n', (14981, 15027), False, 'import cv2\n'), ((15041, 15084), 'cv2.bitwise_and', 'cv2.bitwise_and', (['thresh', 'thresh'], {'mask': 'maske'}), '(thresh, thresh, mask=maske)\n', (15056, 15084), False, 'import cv2\n'), ((15096, 15126), 'matplotlib.pyplot.imshow', 'plt.imshow', (['maske'], {'cmap': '"""gray"""'}), "(maske, cmap='gray')\n", (15106, 15126), True, 'import matplotlib.pyplot as plt\n'), ((15199, 15222), 'cv2.countNonZero', 'cv2.countNonZero', (['maske'], {}), '(maske)\n', (15215, 15222), False, 'import cv2\n'), ((1962, 1984), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (1975, 1984), False, 'import cv2\n'), ((3812, 3834), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (3825, 3834), False, 'import cv2\n'), ((4494, 4516), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (4507, 4516), False, 'import cv2\n'), ((4652, 4670), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (4667, 4670), False, 'import cv2\n'), ((4836, 4856), 'cv2.contourArea', 'cv2.contourArea', (['box'], {}), '(box)\n', (4851, 4856), False, 'import cv2\n'), ((4918, 4977), 'cv2.drawContours', 'cv2.drawContours', (['resim', '[box]', '(0)', '(0, 0, 255)'], {'thickness': '(3)'}), '(resim, [box], 0, (0, 0, 255), thickness=3)\n', (4934, 4977), False, 'import cv2\n'), ((5838, 5860), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (5851, 5860), False, 'import cv2\n'), ((7656, 7678), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (7669, 7678), False, 'import cv2\n'), ((7696, 7719), 'cv2.contourArea', 'cv2.contourArea', (['approx'], {}), '(approx)\n', (7711, 7719), False, 'import cv2\n'), ((8138, 8161), 'cv2.contourArea', 'cv2.contourArea', (['approx'], {}), '(approx)\n', (8153, 8161), False, 'import cv2\n')] |
Tigge/script.filmtipset-grade | service.py | a5b438dc478d6ef40f611585e9cd196c2ff49cf6 | # Copyright (c) 2013, Gustav Tiger
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import xbmc
import xbmcaddon
import xbmcgui
import filmtipset
FILMTIPSET_ACCESS_KEY = "7ndg3Q3qwW8dPzbJMrB5Rw"
class XBMCPlayer(xbmc.Player):
def __init__(self, *args):
self.imdb = None
self.time = None
self.time_total = None
def onPlayBackStarted(self):
self.update()
def onPlayBackEnded(self):
self.onDone()
def onPlayBackStopped(self):
self.onDone()
def update(self):
info = self.getVideoInfoTag()
self.imdb = info.getIMDBNumber()
self.time = self.getTime()
self.time_total = self.getTotalTime()
def onDone(self):
print "getTime", self.time
print "getTotalTime", self.time_total
print "imdb", self.imdb
addon = xbmcaddon.Addon(id='script.filmtipset-grade')
key = addon.getSetting("key")
user = addon.getSetting("user")
grader = filmtipset.Filmtipset(FILMTIPSET_ACCESS_KEY, key, user)
movie = grader.get_movie_imdb(self.imdb)
print movie
if movie["grade"]["type"] != "seen":
dialog = xbmcgui.Dialog()
grade = dialog.select("Grade " + movie["orgname"] + " on filmtipset:",
["Skip", "1", "2", "3", "4", "5"])
if grade != 0:
print dialog, grade
print grader.grade(movie["id"], grade)
player = XBMCPlayer()
while(not xbmc.abortRequested):
if player.isPlayingVideo():
player.update()
xbmc.sleep(1000)
| [] |
turkeydonkey/nzmath3 | test/testMatrix.py | a48ae9efcf0d9ad1485c2e9863c948a7f1b20311 |
import unittest
from nzmath.matrix import *
import nzmath.vector as vector
import nzmath.rational as rational
import nzmath.poly.uniutil as uniutil
Ra = rational.Rational
Poly = uniutil.polynomial
Int = rational.theIntegerRing
# sub test
try:
from test.testMatrixFiniteField import *
except:
try:
from nzmath.test.testMatrixFiniteField import *
except:
from .testMatrixFiniteField import *
## for RingMatrix
a1 = createMatrix(1, 2, [3, 2])
a2 = Matrix(1, 2, [5, -6])
a3 = createMatrix(3, 2, [7, 8]+[3, -2]+[0, 10])
a4 = Matrix(3, 2, [21, -12]+[1, -1]+[0, 0])
a5 = createMatrix(1, 2, [Poly({0:3, 1:5}, Int), Poly({1:2}, Int)])
## for RingSquareMatrix
b1 = createMatrix(2, 2, [1, 2]+[3, 4])
b2 = Matrix(2, 2, [0, -1]+[1, -2])
b3 = createMatrix(3, 3, [0, 1, 2]+[5, 4, 6]+[7, 9, 8])
b4 = Matrix(3, 3, [1, 2, 3]+[0, 5, -2]+[7, 1, 9])
b5 = createMatrix(3, 3, [1, 3, 2, 4, 6, 5, 6, 8, 9])
b6 = createMatrix(3, 3, [1, 2, 4, 0, 3, 5, 0, 0, 0])
b7 = createMatrix(3, 3, [1, 0, 0, 9, 1, 0, 5, 6, 1])
b8 = Matrix(3, 3, [3, 15, 12]+[2,7,5]+[1,-4,-2])
## for FieldMatrix
c1 = createMatrix(1, 2, [Ra(3), Ra(2)])
c2 = createMatrix(4, 5, \
[Ra(0), 0, 1, 2, -1]+[0, 0, 5, 12, -2]+[0, 0, 1, 3, -1]+[0, 0, 1, 2, 0])
c3 = createMatrix(3, 2, [Ra(1), 2]+[2, 5]+[6, 7])
## for FieldSquareMatrix
d1 = createMatrix(2, 2, [Ra(1), Ra(2)]+[Ra(3), Ra(4)])
d2 = createMatrix(3, 3, [Ra(1), 2, 3]+[4, 5, 6]+[5, 7, 9])
d3 = Matrix(3, 3, \
[Ra(1), Ra(2), Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7, 1, 9])
d4 = createMatrix(6, 6, \
[Ra(4), 2, 5, 0, 2, 1]+[5, 1, 2, 5, 1, 1]+[90, 7, 54, 8, 4, 6]+\
[7, 5, 0, 8, 2, 5]+[8, 2, 6, 5, -4, 2]+[4, 1, 5, 6, 3, 1])
d5 = createMatrix(4, 4, \
[Ra(2), -1, 0, 0]+[-1, 2, -1, 0]+[0, -1, 2, -1]+[0, 0, -1, 2])
d6 = createMatrix(4, 4, \
[Ra(1), 2, 3, 4]+[2, 3, 4, 5]+[3, 4, 5, 6]+[4, 5, 6, 7])
d7 = Matrix(3, 3, \
[Ra(1, 2), Ra(2, 3), Ra(1, 5)]+[Ra(3, 2), Ra(1, 3), Ra(2, 5)]+[Ra(-1, 2), Ra(4, 3), Ra(3, 5)])
## other objects
v1 = vector.Vector([1, 4])
v2 = vector.Vector([8])
v3 = vector.Vector([0, 0, 1])
class MatrixTest(unittest.TestCase):
def testInit(self):
lst_lst = Matrix(3, 2, [[21, -12], [1, -1], [0, 0]])
self.assertEqual(a4, lst_lst)
lst_tuple = Matrix(3, 2, [(21, 1, 0), (-12, -1, 0)])
self.assertEqual(a4, lst_tuple)
lst_vect = Matrix(3, 2, [vector.Vector([21, 1, 0]), vector.Vector([-12, -1, 0])])
self.assertEqual(a4, lst_vect)
def testGetitem(self):
self.assertEqual(2, a1[1, 2])
self.assertEqual(-2, b2[2, 2])
self.assertRaises(IndexError, a1.__getitem__, "wrong")
self.assertEqual(vector.Vector([21, 1, 0]), a4[1])
def testEqual(self):
self.assertTrue(a1 == Matrix(1, 2, [3, 2]))
self.assertTrue(isinstance(a1 == a1, bool))
def testNonZero(self):
self.assertTrue(not zeroMatrix(2, 3))
def testContains(self):
self.assertTrue(5 in a2)
def testCall(self):
call = createMatrix(1, 2, [13, 4])
self.assertEqual(call, a5(2))
def testMap(self):
pow_two = createMatrix(1, 2, [9, 4])
self.assertEqual(pow_two, a1.map(lambda n : n ** 2))
def testReduce(self):
self.assertEqual(-2, a3.reduce(min))
def testGetRow(self):
row1 = vector.Vector([3, -2])
self.assertEqual(row1, a3.getRow(2))
row2 = vector.Vector([1, 2])
self.assertEqual(row2, b1.getRow(1))
def testGetColumn(self):
col1 = vector.Vector([-12, -1, 0])
self.assertEqual(col1, a4.getColumn(2))
col2 = vector.Vector([1, 3])
self.assertEqual(col2, b1.getColumn(1))
def testTranspose(self):
trans = createMatrix(2, 3, [7, 3, 0]+[8, -2, 10])
self.assertEqual(trans, a3.transpose())
def testGetBlock(self):
block = Matrix(2, 3, [4, 6, 5, 6, 8, 9])
self.assertEqual(block, b5.getBlock(2, 1, 2, 3))
def testSubMatrix(self):
sub1 = createMatrix(2, 1, [-12, 0])
self.assertEqual(sub1, a4.subMatrix(2, 1))
sub2 = createMatrix(2, 2, [4, 5, 6, 9])
self.assertEqual(sub2, b5.subMatrix([2, 3], [1, 3]))
class SquareMatrixTest(unittest.TestCase):
def testIsUpperTriangularMatrix(self):
UT = createMatrix(4, 4, \
[1, 2, 3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 0, 1])
notUT = createMatrix(4, 4, \
[1, 2, 3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 1, 1])
assert UT.isUpperTriangularMatrix()
assert not notUT.isUpperTriangularMatrix()
def testIsLowerTriangularMatrix(self):
LT = createMatrix(4, 4, \
[1, 0, 0, 0]+[2, 3, 0, 0]+[4, 5, 6, 0]+[7, 8, 9, 10])
notLT = createMatrix(4, 4, \
[1, 0, 0, 0]+[2, 3, 1, 0]+[4, 5, 6, 0]+[7, 8, 9, 10])
assert LT.isLowerTriangularMatrix()
assert not notLT.isLowerTriangularMatrix()
def testIsDiagonalMatrix(self):
diag = createMatrix(2, 2, [-3, 0, 0, 5])
assert diag.isDiagonalMatrix()
def testIsScalarMatrix(self):
scaler = createMatrix(2, 2, [10, 0, 0, 10])
assert scaler.isScalarMatrix()
def testIsSymmetricMatrix(self):
symmetric = createMatrix(2, 2, [2, 3, 3, 5])
assert symmetric.isSymmetricMatrix()
class RingMatrixTest(unittest.TestCase):
def testAdd(self):
sum1 = createMatrix(1, 2, [8, -4])
self.assertEqual(sum1, a1 + a2)
sum2 = createMatrix(2, 2, [1, 1, 4, 2])
self.assertEqual(sum2, b1 + b2)
def testSub(self):
sub1 = createMatrix(1, 2, [-2, 8])
self.assertEqual(sub1, a1 - a2)
sub2 = createMatrix(2, 2, [1, 3, 2, 6])
self.assertEqual(sub2, b1 - b2)
def testMul(self):
mul1 = createMatrix(1, 2, [2, -7])
self.assertEqual(mul1, a1 * b2)
mul2 = createMatrix(3, 2, [-15, -6]+[-2, -2]+[0, 0])
self.assertEqual(mul2, a4 * b1)
mul3 = createMatrix(3, 2, [1, -1]+[109, -64]+[156, -93])
self.assertEqual(mul3, b3 * a4)
def testScalarMul(self):
mul = createMatrix(1, 2, [15, 10])
self.assertEqual(mul, 5 * a1)
def testVectorMul(self):
mul = vector.Vector([9, 19])
self.assertEqual(mul, b1 * v1)
def testMod(self):
mod1 = createMatrix(3, 2, [1, 2]+[0, 1]+[0, 1])
self.assertEqual(mod1, a3 % 3)
def testNeg(self):
neg = createMatrix(2, 2, [0, 1, -1, 2])
self.assertEqual(neg, -b2)
def testHermiteNormalForm(self):
already = createMatrix(4, 3, [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
h = already.hermiteNormalForm()
self.assertEqual(h, already)
lessrank = createMatrix(2, 3, [1, 0, 0, 0, 1, 0])
h = lessrank.hermiteNormalForm()
self.assertEqual(h.row, lessrank.row)
self.assertEqual(h.column, lessrank.column)
zerovec = vector.Vector([0, 0])
self.assertEqual(zerovec, h.getColumn(1))
square = createMatrix(3, 3, [1, 0, 0, 0, 1, 1, 0, 1, 1])
h = square.hermiteNormalForm()
self.assertEqual(h.row, square.row)
self.assertEqual(h.column, square.column)
hermite = createMatrix(3, 3, [0, 1, 0, 0 ,0, 1, 0, 0, 1])
self.assertEqual(hermite, h)
def testExtHermiteNormalForm(self):
already = createMatrix(4, 3, [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
U_1, h_1 = already.exthermiteNormalForm()
self.assertEqual(h_1, already)
self.assertEqual(already * U_1, h_1)
lessrank = createMatrix(2, 3, [1, 0, 0, 0, 1, 0])
U_2, h_2 = lessrank.exthermiteNormalForm()
self.assertEqual(h_2.row, lessrank.row)
self.assertEqual(h_2.column, lessrank.column)
self.assertEqual(lessrank * U_2, h_2)
def testKernelAsModule(self):
ker_1 = a1.kernelAsModule()
self.assertEqual(a1 * ker_1[1], vector.Vector([0]))
#zero test
ker_2 = b1.kernelAsModule()
self.assertEqual(ker_2, None)
class RingSquareMatrixTest(unittest.TestCase):
def testPow(self):
pow1 = createMatrix(2, 2, [7, 10, 15, 22])
self.assertEqual(pow1, b1 ** 2)
pow2 = createMatrix(2, 2, [1, 0, 0, 1])
self.assertEqual(pow2, b2 ** 0)
def testIsOrthogonalMatrix(self):
orthogonal = createMatrix(2, 2, [Ra(3, 5), Ra(4, 5), Ra(-4, 5), Ra(3, 5)])
assert orthogonal.isOrthogonalMatrix()
def testIsAlternatingMatrix(self):
alternate1 = createMatrix(2, 2, [0, 2, -2, 0])
assert alternate1.isAlternatingMatrix()
alternate2 = createMatrix(2, [1, 2, -2, 0])
assert not alternate2.isAntisymmetricMatrix()
def testIsSingular(self):
assert b6.isSingular()
def testTrace(self):
self.assertEqual(15, b4.trace())
def testDeterminant(self):
self.assertEqual(-2, b1.determinant())
#sf.bug #1914349
self.assertTrue(isinstance(b3.determinant(), int))
self.assertEqual(36, b3.determinant())
def testCofactor(self):
self.assertEqual(-6, b5.cofactor(1, 2))
def testCommutator(self):
commutator = createMatrix(2, 2, [5, -1, 9, -5])
self.assertEqual(commutator, b1.commutator(b2))
def testCharacteristicMatrix(self):
charMat = createMatrix(2, 2, \
[Poly({0:-1,1:1}, Int), Poly({0:-2}, Int)]+[Poly({0:-3}, Int), Poly({0:-4,1:1}, Int)])
self.assertEqual(charMat, b1.characteristicMatrix())
def testCharacteristicPolynomial(self):
assert d1.characteristicPolynomial() == d1.characteristicMatrix().determinant()
def testAdjugateMatrix(self):
adjugate = createMatrix(3, 3, [47, -15, -19, -14, -12, 2, -35, 13, 5])
self.assertEqual(adjugate, b4.adjugateMatrix())
assert d1 * d1.adjugateMatrix() == d1.determinant() * unitMatrix(d1.row)
def testCofactorMatrix(self):
cofact = d5.cofactorMatrix()
self.assertEqual(d5.cofactor(2, 3), cofact[2, 3])
def testSmithNormalForm(self):
self.assertEqual([12, 1, 1], b5.smithNormalForm())
self.assertRaises(ValueError, b6.smithNormalForm)
self.assertEqual([1, 1, 1], b7.smithNormalForm())
self.assertEqual([9, 3, 1], b8.smithNormalForm())
def testExtSmithNormalForm(self):
smith1 = Matrix(3, 3, [12, 0, 0, 0, 1, 0, 0, 0, 1])
U_1, V_1, M_1 = b5.extsmithNormalForm()
self.assertEqual(smith1, M_1)
self.assertEqual(M_1, U_1 * b5 * V_1)
smith2 = Matrix(3, 3, [9, 0, 0, 0, 3, 0, 0, 0, 1])
U_2, V_2, M_2 = b8.extsmithNormalForm()
self.assertEqual(smith2, M_2)
self.assertEqual(M_2, U_2 * b8 * V_2)
class FieldMatrixTest(unittest.TestCase):
def testDiv(self):
div = createMatrix(1, 2, [1, Ra(2, 3)])
self.assertEqual(div, c1 / 3)
def testKernel(self):
ker = c2.kernel()
self.assertTrue(not c2 * ker)
def testImage(self):
img = createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0])
self.assertEqual(img, c2.image())
def testRank(self):
self.assertEqual(3, c2.rank())
self.assertEqual(3, d3.rank())
def testInverseImage(self):
self.assertEqual(d6, d5 * d5.inverseImage(d6))
self.assertRaises(NoInverseImage, d2.inverseImage, unitMatrix(3))
def testSolve(self):
for i in range(1, d6.column+1):
self.assertEqual(d6[i], d5 * d5.solve(d6[i])[0])
sol1 = c1.solve(v2)
for i in range(len(sol1[1])):
self.assertEqual(v2, c1 * (sol1[0]+sol1[1][i]))
self.assertRaises(NoInverseImage, c3.solve, v3)
def testColumnEchelonForm(self):
echelon = createMatrix(4, 5,\
[Ra(0), 0, 1, 0, 0]+[0, 0, 0, 2, 3]+[0, 0, 0, 1, 0]+[0, 0, 0, 0, 1])
self.assertEqual(echelon, c2.columnEchelonForm())
class FieldSquareMatrixTest(unittest.TestCase):
def testPow(self):
pow3 = createMatrix(2, 2, [Ra(11, 2), Ra(-5, 2), Ra(-15, 4), Ra(7, 4)])
self.assertEqual(pow3, d1 ** (-2))
def testTriangulate(self):
triangle = createMatrix(3, 3, \
[Ra(1, 1), 2, 3]+[0, 5, -2]+[0, 0, Ra(-86, 5)])
self.assertEqual(triangle, d3.triangulate())
def testDeterminant(self):
self.assertEqual(Ra(-7, 15), d7.determinant())
def testInverse(self):
cinverse = createMatrix(3, 3)
cinverse.set([Ra(-47, 86), Ra(15, 86), Ra(19, 86)]+\
[Ra(7, 43), Ra(6, 43), Ra(-1, 43)]+[Ra(35, 86), Ra(-13, 86), Ra(-5, 86)])
self.assertEqual(cinverse, d3.inverse())
self.assertRaises(NoInverse, d2.inverse)
self.assertEqual(d3.inverse() * c3, d3.inverse(c3))
def testInverseNoChange(self):
# sf bug#1849220
M1 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)])
M1.inverse()
M2 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)])
self.assertEqual(M2, M1)
def testHessenbergForm(self):
pass
def testLUDecomposition(self):
L, U = d4.LUDecomposition()
assert L * U == d4
assert L.isLowerTriangularMatrix()
assert U.isUpperTriangularMatrix()
class MatrixRingTest (unittest.TestCase):
def setUp(self):
self.m2z = MatrixRing.getInstance(2, Int)
def testZero(self):
z = self.m2z.zero
self.assertEqual(0, z[1, 1])
self.assertEqual(0, z[1, 2])
self.assertEqual(0, z[2, 1])
self.assertEqual(0, z[2, 2])
def testOne(self):
o = self.m2z.one
self.assertEqual(1, o[1, 1])
self.assertEqual(0, o[1, 2])
self.assertEqual(0, o[2, 1])
self.assertEqual(1, o[2, 2])
def testUnitMatrix(self):
"""
unitMatrix() is an alias of one.
"""
self.assertEqual(self.m2z.one, self.m2z.unitMatrix())
def testRingAPI(self):
m3z = MatrixRing.getInstance(3, Int)
m2q = MatrixRing.getInstance(2, rational.theRationalField)
# issubring
self.assertFalse(self.m2z.issubring(Int))
self.assertTrue(self.m2z.issubring(self.m2z))
self.assertTrue(self.m2z.issubring(m2q))
self.assertFalse(self.m2z.issubring(m3z))
# issuperring
self.assertFalse(self.m2z.issuperring(Int))
self.assertTrue(self.m2z.issuperring(self.m2z))
self.assertFalse(self.m2z.issuperring(m2q))
self.assertFalse(self.m2z.issuperring(m3z))
# getCommonSuperring
self.assertRaises(TypeError, self.m2z.getCommonSuperring, Int)
class SubspaceTest(unittest.TestCase):
def testSupplementBasis(self):
ba = Subspace(3, 2, [1, 2, 3, 4, 5, 7])
supbase = createMatrix(3, 3, [1, 2, 0, 3, 4, 0, 5, 7, 1])
self.assertEqual(supbase, ba.supplementBasis())
def testSumOfSubspaces(self):
unit1 = Subspace(3, 1, [1, 0, 0])
unit2 = Subspace(3, 2, [0, 0]+[1, 0]+[0, 1])
self.assertEqual(unitMatrix(3), unit1.sumOfSubspaces(unit2))
def testIntersectionOfSubspace(self):
unit1 = Subspace(3, 2, [1, 0]+[0, 1]+[0, 0])
unit2 = unitMatrix(3)
unit2.toSubspace()
intersect = Subspace(3, 2, [-1, 0]+[0, -1]+[0, 0])
self.assertEqual(intersect, unit1.intersectionOfSubspaces(unit2))
class FunctionTest(unittest.TestCase):
def testCreateMatrix(self):
Q = rational.theRationalField
mat1 = createMatrix(2, 3, [[2,3,4], [5,6,7]])
self.assertEqual(mat1.coeff_ring, Int)
mat2 = createMatrix(2, 3, [[2,3,4], [5,6,7]], Q)
self.assertEqual(mat2.coeff_ring, Q)
mat3 = createMatrix(3, [(1, 2, 3), (4, 5, 6), (7, 8, 9)], Q)
self.assertTrue(mat3.row == mat3.column)
self.assertTrue(mat3.__class__, FieldSquareMatrix)
mat4 = createMatrix(2, [vector.Vector([1, 4]), vector.Vector([6, 8])])
self.assertEqual(mat4.coeff_ring, Int)
mat5 = createMatrix(5, 6, Int)
self.assertTrue(mat5 == 0)
mat6 = createMatrix(1, 4)
self.assertTrue(mat6 == 0)
mat7 = createMatrix(3, Q)
self.assertTrue(mat7.row == mat7.column)
self.assertTrue(mat7 == 0)
self.assertEqual(mat7.coeff_ring, Q)
mat8 = createMatrix(7)
self.assertTrue(mat8 == 0)
def suite(suffix="Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| [((1955, 1976), 'nzmath.vector.Vector', 'vector.Vector', (['[1, 4]'], {}), '([1, 4])\n', (1968, 1976), True, 'import nzmath.vector as vector\n'), ((1982, 2000), 'nzmath.vector.Vector', 'vector.Vector', (['[8]'], {}), '([8])\n', (1995, 2000), True, 'import nzmath.vector as vector\n'), ((2006, 2030), 'nzmath.vector.Vector', 'vector.Vector', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2019, 2030), True, 'import nzmath.vector as vector\n'), ((16240, 16260), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (16258, 16260), False, 'import unittest\n'), ((16478, 16503), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (16501, 16503), False, 'import unittest\n'), ((3267, 3289), 'nzmath.vector.Vector', 'vector.Vector', (['[3, -2]'], {}), '([3, -2])\n', (3280, 3289), True, 'import nzmath.vector as vector\n'), ((3350, 3371), 'nzmath.vector.Vector', 'vector.Vector', (['[1, 2]'], {}), '([1, 2])\n', (3363, 3371), True, 'import nzmath.vector as vector\n'), ((3462, 3489), 'nzmath.vector.Vector', 'vector.Vector', (['[-12, -1, 0]'], {}), '([-12, -1, 0])\n', (3475, 3489), True, 'import nzmath.vector as vector\n'), ((3553, 3574), 'nzmath.vector.Vector', 'vector.Vector', (['[1, 3]'], {}), '([1, 3])\n', (3566, 3574), True, 'import nzmath.vector as vector\n'), ((6136, 6158), 'nzmath.vector.Vector', 'vector.Vector', (['[9, 19]'], {}), '([9, 19])\n', (6149, 6158), True, 'import nzmath.vector as vector\n'), ((6838, 6859), 'nzmath.vector.Vector', 'vector.Vector', (['[0, 0]'], {}), '([0, 0])\n', (6851, 6859), True, 'import nzmath.vector as vector\n'), ((2617, 2642), 'nzmath.vector.Vector', 'vector.Vector', (['[21, 1, 0]'], {}), '([21, 1, 0])\n', (2630, 2642), True, 'import nzmath.vector as vector\n'), ((7839, 7857), 'nzmath.vector.Vector', 'vector.Vector', (['[0]'], {}), '([0])\n', (7852, 7857), True, 'import nzmath.vector as vector\n'), ((2327, 2352), 'nzmath.vector.Vector', 'vector.Vector', (['[21, 1, 0]'], {}), '([21, 1, 0])\n', (2340, 2352), True, 'import nzmath.vector as vector\n'), ((2354, 2381), 'nzmath.vector.Vector', 'vector.Vector', (['[-12, -1, 0]'], {}), '([-12, -1, 0])\n', (2367, 2381), True, 'import nzmath.vector as vector\n'), ((15734, 15755), 'nzmath.vector.Vector', 'vector.Vector', (['[1, 4]'], {}), '([1, 4])\n', (15747, 15755), True, 'import nzmath.vector as vector\n'), ((15757, 15778), 'nzmath.vector.Vector', 'vector.Vector', (['[6, 8]'], {}), '([6, 8])\n', (15770, 15778), True, 'import nzmath.vector as vector\n'), ((16374, 16417), 'unittest.makeSuite', 'unittest.makeSuite', (['all_names[name]', '"""test"""'], {}), "(all_names[name], 'test')\n", (16392, 16417), False, 'import unittest\n')] |
li-ma/homework | python/test-nose-3.py | d75b1752a02bd028af0806683abe079c7b0a9b29 | # Module Level
def setUp():
print 'test setup'
def tearDown():
print 'test teardown'
# Function Level
def func_1_setup():
print 'test_func_1 setup'
def func_1_teardown():
print 'test_func_1_teardown'
# Target Func
def test_func_1():
print 'test_func_1 run'
assert True
test_func_1.setUp = func_1_setup
test_func_1.tearDown = func_1_teardown
| [] |
scality/utapi | lib/reindex/reporting.py | 29475f1b9aa25cf3c883262bfb6f4573f846a5b7 | import requests
import redis
import json
import ast
import sys
import time
import urllib
import re
import sys
from threading import Thread
from concurrent.futures import ThreadPoolExecutor
import argparse
def get_options():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--sentinel-ip", default='127.0.0.1', help="Sentinel IP")
parser.add_argument("-p", "--sentinel-port", default="16379", help="Sentinel Port")
parser.add_argument("-v", "--redis-password", default=None, help="Redis AUTH Password")
parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name")
parser.add_argument("-b", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server")
return parser.parse_args()
def safe_print(content):
print("{0}".format(content))
class askRedis():
def __init__(self, ip="127.0.0.1", port="16379", sentinel_cluster_name="scality-s3", password=None):
self._password = password
r = redis.Redis(host=ip, port=port, db=0, password=password)
self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name)
def read(self, resource, name):
r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password)
res = 's3:%s:%s:storageUtilized:counter' % (resource, name)
total_size = r.get(res)
res = 's3:%s:%s:numberOfObjects:counter' % (resource, name)
files = r.get(res)
try:
return {'files': int(files), "total_size": int(total_size)}
except Exception as e:
return {'files': 0, "total_size": 0}
class S3ListBuckets():
def __init__(self, host='127.0.0.1:9000'):
self.bucketd_host = host
def run(self):
docs = []
url = "%s/default/bucket/users..bucket" % self.bucketd_host
session = requests.Session()
r = session.get(url, timeout=30)
if r.status_code == 200:
payload = json.loads(r.text)
for keys in payload['Contents']:
key = keys["key"]
r1 = re.match("(\w+)..\|..(\w+.*)", key)
docs.append(r1.groups())
return docs
return(self.userid, self.bucket, user, files, total_size)
if __name__ == '__main__':
options = get_options()
redis_conf = dict(
ip=options.sentinel_ip,
port=options.sentinel_port,
sentinel_cluster_name=options.sentinel_cluster_name,
password=options.redis_password
)
P = S3ListBuckets(options.bucketd_addr)
listbuckets = P.run()
userids = set([x for x, y in listbuckets])
executor = ThreadPoolExecutor(max_workers=1)
for userid, bucket in listbuckets:
U = askRedis(**redis_conf)
data = U.read('buckets', bucket)
content = "Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, bucket, data["files"], data["total_size"])
executor.submit(safe_print, content)
data = U.read('buckets', 'mpuShadowBucket'+bucket)
content = "Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, 'mpuShadowBucket'+bucket, data["files"], data["total_size"])
executor.submit(safe_print, content)
executor.submit(safe_print, "")
for userid in sorted(userids):
U = askRedis(**redis_conf)
data = U.read('accounts', userid)
content = "Account:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, data["files"], data["total_size"])
executor.submit(safe_print, content) | [((239, 264), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (262, 264), False, 'import argparse\n'), ((2664, 2697), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(1)'}), '(max_workers=1)\n', (2682, 2697), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((1016, 1072), 'redis.Redis', 'redis.Redis', ([], {'host': 'ip', 'port': 'port', 'db': '(0)', 'password': 'password'}), '(host=ip, port=port, db=0, password=password)\n', (1027, 1072), False, 'import redis\n'), ((1211, 1285), 'redis.Redis', 'redis.Redis', ([], {'host': 'self._ip', 'port': 'self._port', 'db': '(0)', 'password': 'self._password'}), '(host=self._ip, port=self._port, db=0, password=self._password)\n', (1222, 1285), False, 'import redis\n'), ((1876, 1894), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1892, 1894), False, 'import requests\n'), ((1991, 2009), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (2001, 2009), False, 'import json\n'), ((2110, 2148), 're.match', 're.match', (['"""(\\\\w+)..\\\\|..(\\\\w+.*)"""', 'key'], {}), "('(\\\\w+)..\\\\|..(\\\\w+.*)', key)\n", (2118, 2148), False, 'import re\n')] |
recitalAI/skim-attention | src/skim/modeling/skim_attention/modeling_skim.py | a37a277072d1f70ea615cfd19e5b84a6effd2464 | from collections import namedtuple
import logging
from dataclasses import dataclass
from typing import Optional, Tuple
import math
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, LayerNorm
from torch.autograd.function import Function
from transformers.file_utils import (
ModelOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPoolingAndCrossAttentions,
MaskedLMOutput,
TokenClassifierOutput,
)
from transformers.models.bert.modeling_bert import (
BertConfig,
BertEmbeddings,
BertIntermediate,
BertOutput,
BertPooler,
BertEncoder,
BertOnlyMLMHead,
)
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMEmbeddings
from .configuration_skim import (
SkimformerConfig,
BertWithSkimEmbedConfig,
SkimmingMaskConfig,
)
logger = logging.getLogger(__name__)
SkimformerEncoderOutput = namedtuple(
"SkimformerEncoderOutput",
["hidden_states", "all_hidden_states"],
)
class SkimformerTextEmbeddings(nn.Module):
"""Construct the text embeddings from word and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.max_position_embeddings = config.max_position_embeddings
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
device = input_ids.device
else:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class Skimformer1DPositionEmbeddings(nn.Module):
"""Construct sequential position embeddings."""
def __init__(self, config):
super().__init__()
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_layout_size)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_shape, device, position_ids=None):
seq_length = input_shape[1]
if position_ids is None:
position_ids = torch.arange(
0, seq_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
position_embeddings = self.position_embeddings(position_ids)
position_embeddings = self.LayerNorm(position_embeddings)
position_embeddings = self.dropout(position_embeddings)
return position_embeddings
class Skimformer2DPositionEmbeddings(nn.Module):
"""Construct the layout embeddings from the bounding box coordinates."""
def __init__(self, config):
super().__init__()
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)
self.degrade_2d_positions = config.degrade_2d_positions if hasattr(config, "degrade_2d_positions") else False
self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, bbox=None):
if self.degrade_2d_positions:
try:
x_center = (bbox[:, :, 0] + bbox[:, :, 2]) // 2
y_center = (bbox[:, :, 1] + bbox[:, :, 3]) // 2
x_center_position_embeddings = self.x_position_embeddings(x_center)
y_center_position_embeddings = self.y_position_embeddings(y_center)
except IndexError as e:
raise IndexError("The :obj:`bbox` coordinate values should be within 0-1000 range.") from e
embeddings = x_center_position_embeddings + y_center_position_embeddings
else:
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The :obj:`bbox` coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
embeddings = (
left_position_embeddings
+ upper_position_embeddings
+ right_position_embeddings
+ lower_position_embeddings
+ h_position_embeddings
+ w_position_embeddings
)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertWithSkimEmbedEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size)
self.x_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size)
self.y_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size)
self.h_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size)
self.w_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(
self,
input_ids=None,
bbox=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
words_embeddings = inputs_embeds
position_embeddings = self.position_embeddings(position_ids)
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
# project into same dimension as text embeddings
left_position_embeddings = self.x_position_projection(left_position_embeddings)
upper_position_embeddings = self.y_position_projection(upper_position_embeddings)
right_position_embeddings = self.x_position_projection(right_position_embeddings)
lower_position_embeddings = self.y_position_projection(lower_position_embeddings)
h_position_embeddings = self.h_position_projection(h_position_embeddings)
w_position_embeddings = self.w_position_projection(w_position_embeddings)
two_dim_pos_embeddings = (
left_position_embeddings
+ upper_position_embeddings
+ right_position_embeddings
+ lower_position_embeddings
+ h_position_embeddings
+ w_position_embeddings
)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (
words_embeddings
+ position_embeddings
+ two_dim_pos_embeddings
+ token_type_embeddings
)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class SkimAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = config.skim_attention_head_size
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_layout_size, self.all_head_size)
self.key = nn.Linear(config.hidden_layout_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_layout_states,
attention_mask=None,
):
key_layer = self.transpose_for_scores(self.key(hidden_layout_states))
query_layer = self.transpose_for_scores(self.query(hidden_layout_states))
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# return the attention probabilities only: Softmax(QK^T/sqrt(d))
return attention_probs
class SkimformerSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = config.attention_head_size
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.value = nn.Linear(config.hidden_size, self.all_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_probs,
head_mask=None,
):
value_layer = self.transpose_for_scores(self.value(hidden_states))
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# Softmax(QK^T/sqrt(d)) . V
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class SkimformerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
all_head_size = config.num_attention_heads * config.attention_head_size
self.dense = nn.Linear(all_head_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class SkimformerAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = SkimformerSelfAttention(config)
self.output = SkimformerSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_probs,
head_mask=None,
):
if len(self.pruned_heads) > 0:
num_attention_heads = attention_probs.shape[1]
indices = [idx for idx in range(num_attention_heads) if idx not in self.pruned_heads]
attention_probs = torch.index_select(attention_probs, 1, indices)
self_output = self.self(
hidden_states,
attention_probs,
head_mask,
)
attention_output = self.output(self_output, hidden_states)
return attention_output
class SkimformerLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = SkimformerAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_probs,
head_mask=None,
):
attention_output = self.attention(
hidden_states,
attention_probs,
head_mask,
)
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
return layer_output
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class SkimformerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([SkimformerLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_probs,
head_mask=None,
output_hidden_states=False,
return_dict=None,
):
all_hidden_states = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if getattr(self.config, "gradient_checkpointing", False):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
layer_output = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_probs,
layer_head_mask,
)
else:
layer_output = layer_module(
hidden_states,
attention_probs,
layer_head_mask,
)
hidden_states = layer_output
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
all_hidden_states,
]
if v is not None
)
return SkimformerEncoderOutput(
hidden_states=hidden_states,
all_hidden_states=all_hidden_states,
)
class SkimformerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = SkimformerConfig
base_model_prefix = "skimformer"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
class BertWithSkimEmbedPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertWithSkimEmbedConfig
base_model_prefix = "bertwithskimembed"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
class SkimmingMaskPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = SkimmingMaskConfig
base_model_prefix = "skimmingmask"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@dataclass
class SkimformerModelOutput(ModelOutput):
"""
Output type of :class:`~SkimformerModel`.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then
``num_predict`` corresponds to ``sequence_length``.
pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
attentions: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
class SkimformerModel(SkimformerPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.use_1d_positions = config.use_1d_positions
self.text_embeddings = SkimformerTextEmbeddings(config)
if self.use_1d_positions:
self.one_dim_pos_embeddings = Skimformer1DPositionEmbeddings(config)
else:
self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config)
self.contextualize_2d_positions = config.contextualize_2d_positions
if self.contextualize_2d_positions:
self.layout_encoder = BertEncoder(
BertConfig(
hidden_size=config.hidden_layout_size,
num_hidden_layers=config.num_hidden_layers_layout_encoder,
num_attention_heads=config.num_attention_heads_layout_encoder,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
max_position_embeddings=config.max_2d_position_embeddings,
initializer_range=config.initializer_range,
layer_norm_eps=config.layer_norm_eps,
gradient_checkpointing=config.gradient_checkpointing,
)
)
self.skim_attention = SkimAttention(config)
self.encoder = SkimformerEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.text_embeddings.word_embeddings
def set_input_embeddings(self, value):
self.text_embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
assert (
len(input_shape) == 2
), "`input_ids` has to be of shape `[batch_size, sequence_length]`, but got shape: {}".format(input_shape)
if bbox is not None:
bbox_shape = bbox.size()
assert (
len(bbox_shape) == 3
), "`bbox` has to be of shape `[batch_size, sequence_length, 4]`, but got shape: {}".format(bbox_shape)
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
text_embedding_output = self.text_embeddings(
input_ids=input_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
if self.use_1d_positions:
pos_embedding_output = self.one_dim_pos_embeddings(
input_shape=input_shape,
device=device,
position_ids=position_ids,
)
else:
pos_embedding_output = self.two_dim_pos_embeddings(
bbox=bbox,
)
if self.contextualize_2d_positions:
pos_embedding_output = self.layout_encoder(
hidden_states=pos_embedding_output,
)[0]
skim_attention_output = self.skim_attention(
pos_embedding_output,
attention_mask=extended_attention_mask,
)
encoder_outputs = self.encoder(
text_embedding_output,
skim_attention_output,
head_mask=head_mask,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
outputs = (sequence_output, pooled_output)
if output_attentions:
outputs = outputs + (skim_attention_output, )
if output_hidden_states:
outputs = outputs + encoder_outputs[1:]
return outputs
return SkimformerModelOutput(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
attentions=skim_attention_output if output_attentions else None,
hidden_states=encoder_outputs.all_hidden_states,
)
class BertWithSkimEmbedModel(BertWithSkimEmbedPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertWithSkimEmbedEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids=input_ids,
bbox=bbox,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class SkimmingMaskModel(SkimmingMaskPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.core_model_type = config.core_model_type
self.embeddings = BertEmbeddings(config) if self.core_model_type == "bert" else LayoutLMEmbeddings(config)
self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config)
self.contextualize_2d_positions = config.contextualize_2d_positions
if config.contextualize_2d_positions:
self.layout_encoder = BertEncoder(
BertConfig(
hidden_size=config.hidden_layout_size,
num_hidden_layers=config.num_hidden_layers_layout_encoder,
num_attention_heads=config.num_attention_heads_layout_encoder,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
max_position_embeddings=config.max_2d_position_embeddings,
initializer_range=config.initializer_range,
layer_norm_eps=config.layer_norm_eps,
gradient_checkpointing=config.gradient_checkpointing,
)
)
self.skim_attention = SkimAttention(config)
self.top_k = config.top_k
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
if self.core_model_type == "bert":
text_embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
else:
text_embedding_output = self.embeddings(
input_ids=input_ids,
bbox=bbox,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
spatial_pos_embedding_output = self.two_dim_pos_embeddings(bbox=bbox)
if self.contextualize_2d_positions:
spatial_pos_embedding_output = self.layout_encoder(hidden_states=spatial_pos_embedding_output)[0]
skim_attention_output = self.skim_attention(
spatial_pos_embedding_output,
attention_mask=extended_attention_mask,
)
topk_idx = torch.topk(skim_attention_output, self.top_k, -1).indices
skim_attention_mask = torch.zeros(skim_attention_output.shape, device=device)
skim_attention_mask = skim_attention_mask.scatter(-1, topk_idx, 1)
skim_attention_mask = skim_attention_mask * attention_mask[:, None, :, :]
skim_attention_mask = (1.0 - skim_attention_mask) * -10000.0
encoder_outputs = self.encoder(
text_embedding_output,
skim_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class SkimformerForMaskedLM(SkimformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.skimformer = SkimformerModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.skimformer(
input_ids,
bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class SkimformerForTokenClassification(SkimformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.skimformer = SkimformerModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.skimformer(
input_ids,
bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class BertWithSkimEmbedForTokenClassification(BertWithSkimEmbedPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert_with_skim_embed = BertWithSkimEmbedModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def get_input_embeddings(self):
return self.bert_with_skim_embed.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.bert_with_skim_embed.embeddings.word_embeddings = value
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert_with_skim_embed(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class SkimmingMaskForTokenClassification(SkimmingMaskPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.skimming_mask_model = SkimmingMaskModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def get_input_embeddings(self):
return self.skimming_mask_model.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.skimming_mask_model.embeddings.word_embeddings = value
def forward(
self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.skimming_mask_model(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if attention_mask is not None:
if attention_mask.dim() == 3:
active_loss = (torch.sum(attention_mask, dim=-1)).view(-1) > 0
else:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) | [((1007, 1034), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1024, 1034), False, 'import logging\n'), ((1062, 1139), 'collections.namedtuple', 'namedtuple', (['"""SkimformerEncoderOutput"""', "['hidden_states', 'all_hidden_states']"], {}), "('SkimformerEncoderOutput', ['hidden_states', 'all_hidden_states'])\n", (1072, 1139), False, 'from collections import namedtuple\n'), ((1433, 1522), 'torch.nn.Embedding', 'nn.Embedding', (['config.vocab_size', 'config.hidden_size'], {'padding_idx': 'config.pad_token_id'}), '(config.vocab_size, config.hidden_size, padding_idx=config.\n pad_token_id)\n', (1445, 1522), False, 'from torch import nn\n'), ((1555, 1611), 'torch.nn.Embedding', 'nn.Embedding', (['config.type_vocab_size', 'config.hidden_size'], {}), '(config.type_vocab_size, config.hidden_size)\n', (1567, 1611), False, 'from torch import nn\n'), ((1638, 1697), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['config.hidden_size'], {'eps': 'config.layer_norm_eps'}), '(config.hidden_size, eps=config.layer_norm_eps)\n', (1650, 1697), False, 'from torch import nn\n'), ((1721, 1759), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (1731, 1759), False, 'from torch import nn\n'), ((2838, 2909), 'torch.nn.Embedding', 'nn.Embedding', (['config.max_position_embeddings', 'config.hidden_layout_size'], {}), '(config.max_position_embeddings, config.hidden_layout_size)\n', (2850, 2909), False, 'from torch import nn\n'), ((3030, 3093), 'torch.nn.LayerNorm', 'LayerNorm', (['config.hidden_layout_size'], {'eps': 'config.layer_norm_eps'}), '(config.hidden_layout_size, eps=config.layer_norm_eps)\n', (3039, 3093), False, 'from torch.nn import CrossEntropyLoss, LayerNorm\n'), ((3117, 3155), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (3127, 3155), False, 'from torch import nn\n'), ((3960, 4034), 'torch.nn.Embedding', 'nn.Embedding', (['config.max_2d_position_embeddings', 'config.hidden_layout_size'], {}), '(config.max_2d_position_embeddings, config.hidden_layout_size)\n', (3972, 4034), False, 'from torch import nn\n'), ((4072, 4146), 'torch.nn.Embedding', 'nn.Embedding', (['config.max_2d_position_embeddings', 'config.hidden_layout_size'], {}), '(config.max_2d_position_embeddings, config.hidden_layout_size)\n', (4084, 4146), False, 'from torch import nn\n'), ((4184, 4258), 'torch.nn.Embedding', 'nn.Embedding', (['config.max_2d_position_embeddings', 'config.hidden_layout_size'], {}), '(config.max_2d_position_embeddings, config.hidden_layout_size)\n', (4196, 4258), False, 'from torch import nn\n'), ((4296, 4370), 'torch.nn.Embedding', 'nn.Embedding', (['config.max_2d_position_embeddings', 'config.hidden_layout_size'], {}), '(config.max_2d_position_embeddings, config.hidden_layout_size)\n', (4308, 4370), False, 'from torch import nn\n'), ((4515, 4578), 'torch.nn.LayerNorm', 'LayerNorm', (['config.hidden_layout_size'], {'eps': 'config.layer_norm_eps'}), '(config.hidden_layout_size, eps=config.layer_norm_eps)\n', (4524, 4578), False, 'from torch.nn import CrossEntropyLoss, LayerNorm\n'), ((4602, 4640), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (4612, 4640), False, 'from torch import nn\n'), ((6599, 6688), 'torch.nn.Embedding', 'nn.Embedding', (['config.vocab_size', 'config.hidden_size'], {'padding_idx': 'config.pad_token_id'}), '(config.vocab_size, config.hidden_size, padding_idx=config.\n pad_token_id)\n', (6611, 6688), False, 'from torch import nn\n'), ((6719, 6783), 'torch.nn.Embedding', 'nn.Embedding', (['config.max_position_embeddings', 'config.hidden_size'], {}), '(config.max_position_embeddings, config.hidden_size)\n', (6731, 6783), False, 'from torch import nn\n'), ((6821, 6877), 'torch.nn.Embedding', 'nn.Embedding', (['config.type_vocab_size', 'config.hidden_size'], {}), '(config.type_vocab_size, config.hidden_size)\n', (6833, 6877), False, 'from torch import nn\n'), ((6916, 6990), 'torch.nn.Embedding', 'nn.Embedding', (['config.max_2d_position_embeddings', 'config.hidden_layout_size'], {}), '(config.max_2d_position_embeddings, config.hidden_layout_size)\n', (6928, 6990), False, 'from torch import nn\n'), ((7028, 7102), 'torch.nn.Embedding', 'nn.Embedding', (['config.max_2d_position_embeddings', 'config.hidden_layout_size'], {}), '(config.max_2d_position_embeddings, config.hidden_layout_size)\n', (7040, 7102), False, 'from torch import nn\n'), ((7140, 7214), 'torch.nn.Embedding', 'nn.Embedding', (['config.max_2d_position_embeddings', 'config.hidden_layout_size'], {}), '(config.max_2d_position_embeddings, config.hidden_layout_size)\n', (7152, 7214), False, 'from torch import nn\n'), ((7252, 7326), 'torch.nn.Embedding', 'nn.Embedding', (['config.max_2d_position_embeddings', 'config.hidden_layout_size'], {}), '(config.max_2d_position_embeddings, config.hidden_layout_size)\n', (7264, 7326), False, 'from torch import nn\n'), ((7365, 7421), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_layout_size', 'config.hidden_size'], {}), '(config.hidden_layout_size, config.hidden_size)\n', (7374, 7421), False, 'from torch import nn\n'), ((7459, 7515), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_layout_size', 'config.hidden_size'], {}), '(config.hidden_layout_size, config.hidden_size)\n', (7468, 7515), False, 'from torch import nn\n'), ((7553, 7609), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_layout_size', 'config.hidden_size'], {}), '(config.hidden_layout_size, config.hidden_size)\n', (7562, 7609), False, 'from torch import nn\n'), ((7647, 7703), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_layout_size', 'config.hidden_size'], {}), '(config.hidden_layout_size, config.hidden_size)\n', (7656, 7703), False, 'from torch import nn\n'), ((7730, 7786), 'torch.nn.LayerNorm', 'LayerNorm', (['config.hidden_size'], {'eps': 'config.layer_norm_eps'}), '(config.hidden_size, eps=config.layer_norm_eps)\n', (7739, 7786), False, 'from torch.nn import CrossEntropyLoss, LayerNorm\n'), ((7810, 7848), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (7820, 7848), False, 'from torch import nn\n'), ((11016, 11072), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_layout_size', 'self.all_head_size'], {}), '(config.hidden_layout_size, self.all_head_size)\n', (11025, 11072), False, 'from torch import nn\n'), ((11092, 11148), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_layout_size', 'self.all_head_size'], {}), '(config.hidden_layout_size, self.all_head_size)\n', (11101, 11148), False, 'from torch import nn\n'), ((11173, 11220), 'torch.nn.Dropout', 'nn.Dropout', (['config.attention_probs_dropout_prob'], {}), '(config.attention_probs_dropout_prob)\n', (11183, 11220), False, 'from torch import nn\n'), ((13015, 13064), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (13024, 13064), False, 'from torch import nn\n'), ((13641, 13683), 'torch.matmul', 'torch.matmul', (['attention_probs', 'value_layer'], {}), '(attention_probs, value_layer)\n', (13653, 13683), False, 'import torch\n'), ((14139, 14183), 'torch.nn.Linear', 'nn.Linear', (['all_head_size', 'config.hidden_size'], {}), '(all_head_size, config.hidden_size)\n', (14148, 14183), False, 'from torch import nn\n'), ((14209, 14268), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['config.hidden_size'], {'eps': 'config.layer_norm_eps'}), '(config.hidden_size, eps=config.layer_norm_eps)\n', (14221, 14268), False, 'from torch import nn\n'), ((14292, 14330), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (14302, 14330), False, 'from torch import nn\n'), ((14926, 15051), 'transformers.modeling_utils.find_pruneable_heads_and_indices', 'find_pruneable_heads_and_indices', (['heads', 'self.self.num_attention_heads', 'self.self.attention_head_size', 'self.pruned_heads'], {}), '(heads, self.self.num_attention_heads, self\n .self.attention_head_size, self.pruned_heads)\n', (14958, 15051), False, 'from transformers.modeling_utils import PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer\n'), ((15126, 15168), 'transformers.modeling_utils.prune_linear_layer', 'prune_linear_layer', (['self.self.value', 'index'], {}), '(self.self.value, index)\n', (15144, 15168), False, 'from transformers.modeling_utils import PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer\n'), ((15197, 15248), 'transformers.modeling_utils.prune_linear_layer', 'prune_linear_layer', (['self.output.dense', 'index'], {'dim': '(1)'}), '(self.output.dense, index, dim=1)\n', (15215, 15248), False, 'from transformers.modeling_utils import PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer\n'), ((16423, 16447), 'transformers.models.bert.modeling_bert.BertIntermediate', 'BertIntermediate', (['config'], {}), '(config)\n', (16439, 16447), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead\n'), ((16470, 16488), 'transformers.models.bert.modeling_bert.BertOutput', 'BertOutput', (['config'], {}), '(config)\n', (16480, 16488), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead\n'), ((16756, 16877), 'transformers.modeling_utils.apply_chunking_to_forward', 'apply_chunking_to_forward', (['self.feed_forward_chunk', 'self.chunk_size_feed_forward', 'self.seq_len_dim', 'attention_output'], {}), '(self.feed_forward_chunk, self.\n chunk_size_feed_forward, self.seq_len_dim, attention_output)\n', (16781, 16877), False, 'from transformers.modeling_utils import PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer\n'), ((30966, 30985), 'transformers.models.bert.modeling_bert.BertEncoder', 'BertEncoder', (['config'], {}), '(config)\n', (30977, 30985), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead\n'), ((34677, 34934), 'transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions', 'BaseModelOutputWithPoolingAndCrossAttentions', ([], {'last_hidden_state': 'sequence_output', 'pooler_output': 'pooled_output', 'hidden_states': 'encoder_outputs.hidden_states', 'attentions': 'encoder_outputs.attentions', 'cross_attentions': 'encoder_outputs.cross_attentions'}), '(last_hidden_state=\n sequence_output, pooler_output=pooled_output, hidden_states=\n encoder_outputs.hidden_states, attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions)\n', (34721, 34934), False, 'from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput\n'), ((36521, 36540), 'transformers.models.bert.modeling_bert.BertEncoder', 'BertEncoder', (['config'], {}), '(config)\n', (36532, 36540), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead\n'), ((40494, 40549), 'torch.zeros', 'torch.zeros', (['skim_attention_output.shape'], {'device': 'device'}), '(skim_attention_output.shape, device=device)\n', (40505, 40549), False, 'import torch\n'), ((41335, 41592), 'transformers.modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions', 'BaseModelOutputWithPoolingAndCrossAttentions', ([], {'last_hidden_state': 'sequence_output', 'pooler_output': 'pooled_output', 'hidden_states': 'encoder_outputs.hidden_states', 'attentions': 'encoder_outputs.attentions', 'cross_attentions': 'encoder_outputs.cross_attentions'}), '(last_hidden_state=\n sequence_output, pooler_output=pooled_output, hidden_states=\n encoder_outputs.hidden_states, attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions)\n', (41379, 41592), False, 'from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput\n'), ((42008, 42031), 'transformers.models.bert.modeling_bert.BertOnlyMLMHead', 'BertOnlyMLMHead', (['config'], {}), '(config)\n', (42023, 42031), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead\n'), ((44082, 44216), 'transformers.modeling_outputs.MaskedLMOutput', 'MaskedLMOutput', ([], {'loss': 'masked_lm_loss', 'logits': 'prediction_scores', 'hidden_states': 'outputs.hidden_states', 'attentions': 'outputs.attentions'}), '(loss=masked_lm_loss, logits=prediction_scores, hidden_states\n =outputs.hidden_states, attentions=outputs.attentions)\n', (44096, 44216), False, 'from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput\n'), ((44602, 44640), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (44612, 44640), False, 'from torch import nn\n'), ((44667, 44715), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.num_labels'], {}), '(config.hidden_size, config.num_labels)\n', (44676, 44715), False, 'from torch import nn\n'), ((46801, 46921), 'transformers.modeling_outputs.TokenClassifierOutput', 'TokenClassifierOutput', ([], {'loss': 'loss', 'logits': 'logits', 'hidden_states': 'outputs.hidden_states', 'attentions': 'outputs.attentions'}), '(loss=loss, logits=logits, hidden_states=outputs.\n hidden_states, attentions=outputs.attentions)\n', (46822, 46921), False, 'from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput\n'), ((47338, 47376), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (47348, 47376), False, 'from torch import nn\n'), ((47403, 47451), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.num_labels'], {}), '(config.hidden_size, config.num_labels)\n', (47412, 47451), False, 'from torch import nn\n'), ((49382, 49502), 'transformers.modeling_outputs.TokenClassifierOutput', 'TokenClassifierOutput', ([], {'loss': 'loss', 'logits': 'logits', 'hidden_states': 'outputs.hidden_states', 'attentions': 'outputs.attentions'}), '(loss=loss, logits=logits, hidden_states=outputs.\n hidden_states, attentions=outputs.attentions)\n', (49403, 49502), False, 'from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput\n'), ((49902, 49940), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (49912, 49940), False, 'from torch import nn\n'), ((49967, 50015), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.num_labels'], {}), '(config.hidden_size, config.num_labels)\n', (49976, 50015), False, 'from torch import nn\n'), ((52098, 52218), 'transformers.modeling_outputs.TokenClassifierOutput', 'TokenClassifierOutput', ([], {'loss': 'loss', 'logits': 'logits', 'hidden_states': 'outputs.hidden_states', 'attentions': 'outputs.attentions'}), '(loss=loss, logits=logits, hidden_states=outputs.\n hidden_states, attentions=outputs.attentions)\n', (52119, 52218), False, 'from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput\n'), ((2231, 2288), 'torch.zeros', 'torch.zeros', (['input_shape'], {'dtype': 'torch.long', 'device': 'device'}), '(input_shape, dtype=torch.long, device=device)\n', (2242, 2288), False, 'import torch\n'), ((3320, 3380), 'torch.arange', 'torch.arange', (['(0)', 'seq_length'], {'dtype': 'torch.long', 'device': 'device'}), '(0, seq_length, dtype=torch.long, device=device)\n', (3332, 3380), False, 'import torch\n'), ((8549, 8606), 'torch.zeros', 'torch.zeros', (['input_shape'], {'dtype': 'torch.long', 'device': 'device'}), '(input_shape, dtype=torch.long, device=device)\n', (8560, 8606), False, 'import torch\n'), ((11995, 12030), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (12004, 12030), False, 'import math\n'), ((12324, 12342), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (12334, 12342), False, 'from torch import nn\n'), ((15879, 15926), 'torch.index_select', 'torch.index_select', (['attention_probs', '(1)', 'indices'], {}), '(attention_probs, 1, indices)\n', (15897, 15926), False, 'import torch\n'), ((25468, 25486), 'transformers.models.bert.modeling_bert.BertPooler', 'BertPooler', (['config'], {}), '(config)\n', (25478, 25486), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead\n'), ((27843, 27894), 'torch.ones', 'torch.ones', (['(batch_size, seq_length)'], {'device': 'device'}), '((batch_size, seq_length), device=device)\n', (27853, 27894), False, 'import torch\n'), ((27961, 28018), 'torch.zeros', 'torch.zeros', (['input_shape'], {'dtype': 'torch.long', 'device': 'device'}), '(input_shape, dtype=torch.long, device=device)\n', (27972, 28018), False, 'import torch\n'), ((31008, 31026), 'transformers.models.bert.modeling_bert.BertPooler', 'BertPooler', (['config'], {}), '(config)\n', (31018, 31026), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead\n'), ((32857, 32895), 'torch.ones', 'torch.ones', (['input_shape'], {'device': 'device'}), '(input_shape, device=device)\n', (32867, 32895), False, 'import torch\n'), ((32960, 33017), 'torch.zeros', 'torch.zeros', (['input_shape'], {'dtype': 'torch.long', 'device': 'device'}), '(input_shape, dtype=torch.long, device=device)\n', (32971, 33017), False, 'import torch\n'), ((35249, 35271), 'transformers.models.bert.modeling_bert.BertEmbeddings', 'BertEmbeddings', (['config'], {}), '(config)\n', (35263, 35271), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead\n'), ((35311, 35337), 'transformers.models.layoutlm.modeling_layoutlm.LayoutLMEmbeddings', 'LayoutLMEmbeddings', (['config'], {}), '(config)\n', (35329, 35337), False, 'from transformers.models.layoutlm.modeling_layoutlm import LayoutLMEmbeddings\n'), ((36564, 36582), 'transformers.models.bert.modeling_bert.BertPooler', 'BertPooler', (['config'], {}), '(config)\n', (36574, 36582), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead\n'), ((38413, 38451), 'torch.ones', 'torch.ones', (['input_shape'], {'device': 'device'}), '(input_shape, device=device)\n', (38423, 38451), False, 'import torch\n'), ((38516, 38573), 'torch.zeros', 'torch.zeros', (['input_shape'], {'dtype': 'torch.long', 'device': 'device'}), '(input_shape, dtype=torch.long, device=device)\n', (38527, 38573), False, 'import torch\n'), ((40406, 40455), 'torch.topk', 'torch.topk', (['skim_attention_output', 'self.top_k', '(-1)'], {}), '(skim_attention_output, self.top_k, -1)\n', (40416, 40455), False, 'import torch\n'), ((43735, 43753), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (43751, 43753), False, 'from torch.nn import CrossEntropyLoss, LayerNorm\n'), ((46078, 46096), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (46094, 46096), False, 'from torch.nn import CrossEntropyLoss, LayerNorm\n'), ((48798, 48816), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (48814, 48816), False, 'from torch.nn import CrossEntropyLoss, LayerNorm\n'), ((51359, 51377), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (51375, 51377), False, 'from torch.nn import CrossEntropyLoss, LayerNorm\n'), ((24536, 25129), 'transformers.models.bert.modeling_bert.BertConfig', 'BertConfig', ([], {'hidden_size': 'config.hidden_layout_size', 'num_hidden_layers': 'config.num_hidden_layers_layout_encoder', 'num_attention_heads': 'config.num_attention_heads_layout_encoder', 'intermediate_size': 'config.intermediate_size', 'hidden_act': 'config.hidden_act', 'hidden_dropout_prob': 'config.hidden_dropout_prob', 'attention_probs_dropout_prob': 'config.attention_probs_dropout_prob', 'max_position_embeddings': 'config.max_2d_position_embeddings', 'initializer_range': 'config.initializer_range', 'layer_norm_eps': 'config.layer_norm_eps', 'gradient_checkpointing': 'config.gradient_checkpointing'}), '(hidden_size=config.hidden_layout_size, num_hidden_layers=config.\n num_hidden_layers_layout_encoder, num_attention_heads=config.\n num_attention_heads_layout_encoder, intermediate_size=config.\n intermediate_size, hidden_act=config.hidden_act, hidden_dropout_prob=\n config.hidden_dropout_prob, attention_probs_dropout_prob=config.\n attention_probs_dropout_prob, max_position_embeddings=config.\n max_2d_position_embeddings, initializer_range=config.initializer_range,\n layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.\n gradient_checkpointing)\n', (24546, 25129), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead\n'), ((35600, 36193), 'transformers.models.bert.modeling_bert.BertConfig', 'BertConfig', ([], {'hidden_size': 'config.hidden_layout_size', 'num_hidden_layers': 'config.num_hidden_layers_layout_encoder', 'num_attention_heads': 'config.num_attention_heads_layout_encoder', 'intermediate_size': 'config.intermediate_size', 'hidden_act': 'config.hidden_act', 'hidden_dropout_prob': 'config.hidden_dropout_prob', 'attention_probs_dropout_prob': 'config.attention_probs_dropout_prob', 'max_position_embeddings': 'config.max_2d_position_embeddings', 'initializer_range': 'config.initializer_range', 'layer_norm_eps': 'config.layer_norm_eps', 'gradient_checkpointing': 'config.gradient_checkpointing'}), '(hidden_size=config.hidden_layout_size, num_hidden_layers=config.\n num_hidden_layers_layout_encoder, num_attention_heads=config.\n num_attention_heads_layout_encoder, intermediate_size=config.\n intermediate_size, hidden_act=config.hidden_act, hidden_dropout_prob=\n config.hidden_dropout_prob, attention_probs_dropout_prob=config.\n attention_probs_dropout_prob, max_position_embeddings=config.\n max_2d_position_embeddings, initializer_range=config.initializer_range,\n layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.\n gradient_checkpointing)\n', (35610, 36193), False, 'from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead\n'), ((7895, 7939), 'torch.arange', 'torch.arange', (['config.max_position_embeddings'], {}), '(config.max_position_embeddings)\n', (7907, 7939), False, 'import torch\n'), ((46408, 46443), 'torch.tensor', 'torch.tensor', (['loss_fct.ignore_index'], {}), '(loss_fct.ignore_index)\n', (46420, 46443), False, 'import torch\n'), ((51503, 51536), 'torch.sum', 'torch.sum', (['attention_mask'], {'dim': '(-1)'}), '(attention_mask, dim=-1)\n', (51512, 51536), False, 'import torch\n')] |
xming521/coco_API | api/routers/dashboard.py | 51d7ac3141e58f1d6a5438af135fba3ea101bd53 | import time
import psutil
import pymysql
from fastapi import APIRouter
from api.utils import response_code
router = APIRouter()
@router.get('/dashboard/getinfo')
def getinfo():
from init_global import g
res = {}
db = g.db_pool.connection()
cur = db.cursor()
cur.execute(f'select count(app_name) from app_list')
res['app_count'] = cur.fetchall()[0][0]
cur.execute(f'select count(app_name) from app_list where status="running"')
res['app_run_count'] = cur.fetchall()[0][0]
res['image_count'] = len(g.dc.images.list())
res['networks_count'] = len(g.dc.networks.list())
cur = db.cursor(cursor=pymysql.cursors.DictCursor)
cur.execute(f'select * from app_list order by start_time desc limit 10')
res['recent_event'] = cur.fetchall()
db.close()
return response_code.resp_200(data={"res": res})
def get_performance():
res = {}
# cpu
cpuCount = psutil.cpu_count(logical=False) # CPU核心
cpuPercent = psutil.cpu_percent(0.5) # 使用率
cpufree = round(100 - cpuPercent, 2) # CPU空余
# 内存
m = psutil.virtual_memory() # 内存信息
memoryTotal = round(m.total / (1024.0 * 1024.0 * 1024.0), 2) # 总内存
memoryUsed = round(m.used / (1024.0 * 1024.0 * 1024.0), 2) # 已用内存
memoryFree = round(memoryTotal - memoryUsed, 2) # 剩余内存
# 磁盘
io = psutil.disk_partitions()
diskCount = len(io)
diskTotal = 0 # 总储存空间大小
diskUsed = 0 # 已用
diskFree = 0 # 剩余
for i in io:
try:
o = psutil.disk_usage(i.mountpoint)
diskTotal += int(o.total / (1024.0 * 1024.0 * 1024.0))
diskUsed += int(o.used / (1024.0 * 1024.0 * 1024.0))
diskFree += int(o.free / (1024.0 * 1024.0 * 1024.0))
except:
pass
res['cpu'] = cpuPercent
res['mem'] = m.percent
res['disk'] = o.percent
res['memoryTotal'] = memoryTotal
res['memoryUsed'] = memoryUsed
res['diskTotal'] = diskTotal
res['diskUsed'] = diskUsed
return res
def push_realinfo():
from init_global import g
from main import socket_manager as sm
print(g.person_online)
while g.person_online:
res = get_performance()
# print(res)
g.push_loop.run_until_complete(sm.emit('dashboard', {'data': res}))
time.sleep(3)
| [((119, 130), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (128, 130), False, 'from fastapi import APIRouter\n'), ((234, 256), 'init_global.g.db_pool.connection', 'g.db_pool.connection', ([], {}), '()\n', (254, 256), False, 'from init_global import g\n'), ((810, 851), 'api.utils.response_code.resp_200', 'response_code.resp_200', ([], {'data': "{'res': res}"}), "(data={'res': res})\n", (832, 851), False, 'from api.utils import response_code\n'), ((915, 946), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(False)'}), '(logical=False)\n', (931, 946), False, 'import psutil\n'), ((973, 996), 'psutil.cpu_percent', 'psutil.cpu_percent', (['(0.5)'], {}), '(0.5)\n', (991, 996), False, 'import psutil\n'), ((1071, 1094), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (1092, 1094), False, 'import psutil\n'), ((1324, 1348), 'psutil.disk_partitions', 'psutil.disk_partitions', ([], {}), '()\n', (1346, 1348), False, 'import psutil\n'), ((537, 555), 'init_global.g.dc.images.list', 'g.dc.images.list', ([], {}), '()\n', (553, 555), False, 'from init_global import g\n'), ((589, 609), 'init_global.g.dc.networks.list', 'g.dc.networks.list', ([], {}), '()\n', (607, 609), False, 'from init_global import g\n'), ((2276, 2289), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2286, 2289), False, 'import time\n'), ((1494, 1525), 'psutil.disk_usage', 'psutil.disk_usage', (['i.mountpoint'], {}), '(i.mountpoint)\n', (1511, 1525), False, 'import psutil\n'), ((2231, 2266), 'main.socket_manager.emit', 'sm.emit', (['"""dashboard"""', "{'data': res}"], {}), "('dashboard', {'data': res})\n", (2238, 2266), True, 'from main import socket_manager as sm\n')] |
yujiatay/deep-motion-editing | retargeting/models/Kinematics.py | 0a6fc5fd20059c5074f68a452cd49cf6ede36ea8 | import torch
import torch.nn as nn
import numpy as np
import math
class ForwardKinematics:
def __init__(self, args, edges):
self.topology = [-1] * (len(edges) + 1)
self.rotation_map = []
for i, edge in enumerate(edges):
self.topology[edge[1]] = edge[0]
self.rotation_map.append(edge[1])
self.world = args.fk_world
self.pos_repr = args.pos_repr
self.quater = args.rotation == 'quaternion'
def forward_from_raw(self, raw, offset, world=None, quater=None):
if world is None: world = self.world
if quater is None: quater = self.quater
if self.pos_repr == '3d':
position = raw[:, -3:, :]
rotation = raw[:, :-3, :]
elif self.pos_repr == '4d':
raise Exception('Not support')
if quater:
rotation = rotation.reshape((rotation.shape[0], -1, 4, rotation.shape[-1]))
identity = torch.tensor((1, 0, 0, 0), dtype=torch.float, device=raw.device)
else:
rotation = rotation.reshape((rotation.shape[0], -1, 3, rotation.shape[-1]))
identity = torch.zeros((3, ), dtype=torch.float, device=raw.device)
identity = identity.reshape((1, 1, -1, 1))
new_shape = list(rotation.shape)
new_shape[1] += 1
new_shape[2] = 1
rotation_final = identity.repeat(new_shape)
for i, j in enumerate(self.rotation_map):
rotation_final[:, j, :, :] = rotation[:, i, :, :]
return self.forward(rotation_final, position, offset, world=world, quater=quater)
'''
rotation should have shape batch_size * Joint_num * (3/4) * Time
position should have shape batch_size * 3 * Time
offset should have shape batch_size * Joint_num * 3
output have shape batch_size * Time * Joint_num * 3
'''
def forward(self, rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True):
if not quater and rotation.shape[-2] != 3: raise Exception('Unexpected shape of rotation')
if quater and rotation.shape[-2] != 4: raise Exception('Unexpected shape of rotation')
rotation = rotation.permute(0, 3, 1, 2)
position = position.permute(0, 2, 1)
result = torch.empty(rotation.shape[:-1] + (3, ), device=position.device)
norm = torch.norm(rotation, dim=-1, keepdim=True)
#norm[norm < 1e-10] = 1
rotation = rotation / norm
if quater:
transform = self.transform_from_quaternion(rotation)
else:
transform = self.transform_from_euler(rotation, order)
offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1))
result[..., 0, :] = position
for i, pi in enumerate(self.topology):
if pi == -1:
assert i == 0
continue
transform[..., i, :, :] = torch.matmul(transform[..., pi, :, :], transform[..., i, :, :])
result[..., i, :] = torch.matmul(transform[..., i, :, :], offset[..., i, :, :]).squeeze()
if world: result[..., i, :] += result[..., pi, :]
return result
def from_local_to_world(self, res: torch.Tensor):
res = res.clone()
for i, pi in enumerate(self.topology):
if pi == 0 or pi == -1:
continue
res[..., i, :] += res[..., pi, :]
return res
@staticmethod
def transform_from_euler(rotation, order):
rotation = rotation / 180 * math.pi
transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]),
ForwardKinematics.transform_from_axis(rotation[..., 2], order[2]))
transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform)
return transform
@staticmethod
def transform_from_axis(euler, axis):
transform = torch.empty(euler.shape[0:3] + (3, 3), device=euler.device)
cos = torch.cos(euler)
sin = torch.sin(euler)
cord = ord(axis) - ord('x')
transform[..., cord, :] = transform[..., :, cord] = 0
transform[..., cord, cord] = 1
if axis == 'x':
transform[..., 1, 1] = transform[..., 2, 2] = cos
transform[..., 1, 2] = -sin
transform[..., 2, 1] = sin
if axis == 'y':
transform[..., 0, 0] = transform[..., 2, 2] = cos
transform[..., 0, 2] = sin
transform[..., 2, 0] = -sin
if axis == 'z':
transform[..., 0, 0] = transform[..., 1, 1] = cos
transform[..., 0, 1] = -sin
transform[..., 1, 0] = sin
return transform
@staticmethod
def transform_from_quaternion(quater: torch.Tensor):
qw = quater[..., 0]
qx = quater[..., 1]
qy = quater[..., 2]
qz = quater[..., 3]
x2 = qx + qx
y2 = qy + qy
z2 = qz + qz
xx = qx * x2
yy = qy * y2
wx = qw * x2
xy = qx * y2
yz = qy * z2
wy = qw * y2
xz = qx * z2
zz = qz * z2
wz = qw * z2
m = torch.empty(quater.shape[:-1] + (3, 3), device=quater.device)
m[..., 0, 0] = 1.0 - (yy + zz)
m[..., 0, 1] = xy - wz
m[..., 0, 2] = xz + wy
m[..., 1, 0] = xy + wz
m[..., 1, 1] = 1.0 - (xx + zz)
m[..., 1, 2] = yz - wx
m[..., 2, 0] = xz - wy
m[..., 2, 1] = yz + wx
m[..., 2, 2] = 1.0 - (xx + yy)
return m
class InverseKinematics:
def __init__(self, rotations: torch.Tensor, positions: torch.Tensor, offset, parents, constrains):
self.rotations = rotations
self.rotations.requires_grad_(True)
self.position = positions
self.position.requires_grad_(True)
self.parents = parents
self.offset = offset
self.constrains = constrains
self.optimizer = torch.optim.Adam([self.position, self.rotations], lr=1e-3, betas=(0.9, 0.999))
self.crit = nn.MSELoss()
def step(self):
self.optimizer.zero_grad()
glb = self.forward(self.rotations, self.position, self.offset, order='', quater=True, world=True)
loss = self.crit(glb, self.constrains)
loss.backward()
self.optimizer.step()
self.glb = glb
return loss.item()
def tloss(self, time):
return self.crit(self.glb[time, :], self.constrains[time, :])
def all_loss(self):
res = [self.tloss(t).detach().numpy() for t in range(self.constrains.shape[0])]
return np.array(res)
'''
rotation should have shape batch_size * Joint_num * (3/4) * Time
position should have shape batch_size * 3 * Time
offset should have shape batch_size * Joint_num * 3
output have shape batch_size * Time * Joint_num * 3
'''
def forward(self, rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False,
world=True):
'''
if not quater and rotation.shape[-2] != 3: raise Exception('Unexpected shape of rotation')
if quater and rotation.shape[-2] != 4: raise Exception('Unexpected shape of rotation')
rotation = rotation.permute(0, 3, 1, 2)
position = position.permute(0, 2, 1)
'''
result = torch.empty(rotation.shape[:-1] + (3,), device=position.device)
norm = torch.norm(rotation, dim=-1, keepdim=True)
rotation = rotation / norm
if quater:
transform = self.transform_from_quaternion(rotation)
else:
transform = self.transform_from_euler(rotation, order)
offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1))
result[..., 0, :] = position
for i, pi in enumerate(self.parents):
if pi == -1:
assert i == 0
continue
result[..., i, :] = torch.matmul(transform[..., pi, :, :], offset[..., i, :, :]).squeeze()
transform[..., i, :, :] = torch.matmul(transform[..., pi, :, :], transform[..., i, :, :])
if world: result[..., i, :] += result[..., pi, :]
return result
@staticmethod
def transform_from_euler(rotation, order):
rotation = rotation / 180 * math.pi
transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]),
ForwardKinematics.transform_from_axis(rotation[..., 2], order[2]))
transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform)
return transform
@staticmethod
def transform_from_axis(euler, axis):
transform = torch.empty(euler.shape[0:3] + (3, 3), device=euler.device)
cos = torch.cos(euler)
sin = torch.sin(euler)
cord = ord(axis) - ord('x')
transform[..., cord, :] = transform[..., :, cord] = 0
transform[..., cord, cord] = 1
if axis == 'x':
transform[..., 1, 1] = transform[..., 2, 2] = cos
transform[..., 1, 2] = -sin
transform[..., 2, 1] = sin
if axis == 'y':
transform[..., 0, 0] = transform[..., 2, 2] = cos
transform[..., 0, 2] = sin
transform[..., 2, 0] = -sin
if axis == 'z':
transform[..., 0, 0] = transform[..., 1, 1] = cos
transform[..., 0, 1] = -sin
transform[..., 1, 0] = sin
return transform
@staticmethod
def transform_from_quaternion(quater: torch.Tensor):
qw = quater[..., 0]
qx = quater[..., 1]
qy = quater[..., 2]
qz = quater[..., 3]
x2 = qx + qx
y2 = qy + qy
z2 = qz + qz
xx = qx * x2
yy = qy * y2
wx = qw * x2
xy = qx * y2
yz = qy * z2
wy = qw * y2
xz = qx * z2
zz = qz * z2
wz = qw * z2
m = torch.empty(quater.shape[:-1] + (3, 3), device=quater.device)
m[..., 0, 0] = 1.0 - (yy + zz)
m[..., 0, 1] = xy - wz
m[..., 0, 2] = xz + wy
m[..., 1, 0] = xy + wz
m[..., 1, 1] = 1.0 - (xx + zz)
m[..., 1, 2] = yz - wx
m[..., 2, 0] = xz - wy
m[..., 2, 1] = yz + wx
m[..., 2, 2] = 1.0 - (xx + yy)
return m
| [((2281, 2344), 'torch.empty', 'torch.empty', (['(rotation.shape[:-1] + (3,))'], {'device': 'position.device'}), '(rotation.shape[:-1] + (3,), device=position.device)\n', (2292, 2344), False, 'import torch\n'), ((2363, 2405), 'torch.norm', 'torch.norm', (['rotation'], {'dim': '(-1)', 'keepdim': '(True)'}), '(rotation, dim=-1, keepdim=True)\n', (2373, 2405), False, 'import torch\n'), ((3956, 4015), 'torch.empty', 'torch.empty', (['(euler.shape[0:3] + (3, 3))'], {'device': 'euler.device'}), '(euler.shape[0:3] + (3, 3), device=euler.device)\n', (3967, 4015), False, 'import torch\n'), ((4030, 4046), 'torch.cos', 'torch.cos', (['euler'], {}), '(euler)\n', (4039, 4046), False, 'import torch\n'), ((4061, 4077), 'torch.sin', 'torch.sin', (['euler'], {}), '(euler)\n', (4070, 4077), False, 'import torch\n'), ((5192, 5253), 'torch.empty', 'torch.empty', (['(quater.shape[:-1] + (3, 3))'], {'device': 'quater.device'}), '(quater.shape[:-1] + (3, 3), device=quater.device)\n', (5203, 5253), False, 'import torch\n'), ((5985, 6064), 'torch.optim.Adam', 'torch.optim.Adam', (['[self.position, self.rotations]'], {'lr': '(0.001)', 'betas': '(0.9, 0.999)'}), '([self.position, self.rotations], lr=0.001, betas=(0.9, 0.999))\n', (6001, 6064), False, 'import torch\n'), ((6084, 6096), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6094, 6096), True, 'import torch.nn as nn\n'), ((6636, 6649), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (6644, 6649), True, 'import numpy as np\n'), ((7394, 7457), 'torch.empty', 'torch.empty', (['(rotation.shape[:-1] + (3,))'], {'device': 'position.device'}), '(rotation.shape[:-1] + (3,), device=position.device)\n', (7405, 7457), False, 'import torch\n'), ((7474, 7516), 'torch.norm', 'torch.norm', (['rotation'], {'dim': '(-1)', 'keepdim': '(True)'}), '(rotation, dim=-1, keepdim=True)\n', (7484, 7516), False, 'import torch\n'), ((8780, 8839), 'torch.empty', 'torch.empty', (['(euler.shape[0:3] + (3, 3))'], {'device': 'euler.device'}), '(euler.shape[0:3] + (3, 3), device=euler.device)\n', (8791, 8839), False, 'import torch\n'), ((8854, 8870), 'torch.cos', 'torch.cos', (['euler'], {}), '(euler)\n', (8863, 8870), False, 'import torch\n'), ((8885, 8901), 'torch.sin', 'torch.sin', (['euler'], {}), '(euler)\n', (8894, 8901), False, 'import torch\n'), ((10016, 10077), 'torch.empty', 'torch.empty', (['(quater.shape[:-1] + (3, 3))'], {'device': 'quater.device'}), '(quater.shape[:-1] + (3, 3), device=quater.device)\n', (10027, 10077), False, 'import torch\n'), ((950, 1014), 'torch.tensor', 'torch.tensor', (['(1, 0, 0, 0)'], {'dtype': 'torch.float', 'device': 'raw.device'}), '((1, 0, 0, 0), dtype=torch.float, device=raw.device)\n', (962, 1014), False, 'import torch\n'), ((1140, 1195), 'torch.zeros', 'torch.zeros', (['(3,)'], {'dtype': 'torch.float', 'device': 'raw.device'}), '((3,), dtype=torch.float, device=raw.device)\n', (1151, 1195), False, 'import torch\n'), ((2925, 2996), 'torch.matmul', 'torch.matmul', (['transform[(...), (pi), :, :]', 'transform[(...), (i), :, :]'], {}), '(transform[(...), (pi), :, :], transform[(...), (i), :, :])\n', (2937, 2996), False, 'import torch\n'), ((8105, 8176), 'torch.matmul', 'torch.matmul', (['transform[(...), (pi), :, :]', 'transform[(...), (i), :, :]'], {}), '(transform[(...), (pi), :, :], transform[(...), (i), :, :])\n', (8117, 8176), False, 'import torch\n'), ((3021, 3088), 'torch.matmul', 'torch.matmul', (['transform[(...), (i), :, :]', 'offset[(...), (i), :, :]'], {}), '(transform[(...), (i), :, :], offset[(...), (i), :, :])\n', (3033, 3088), False, 'import torch\n'), ((7996, 8064), 'torch.matmul', 'torch.matmul', (['transform[(...), (pi), :, :]', 'offset[(...), (i), :, :]'], {}), '(transform[(...), (pi), :, :], offset[(...), (i), :, :])\n', (8008, 8064), False, 'import torch\n')] |
Ryan-Miao/airflow | tests/operators/test_hive_operator.py | a2aca8714fac014ed7da97229d7877f1bc6e5a59 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import os
import unittest
from unittest import mock
import nose
from airflow import DAG, configuration, operators
from airflow.models import TaskInstance
from airflow.operators.hive_operator import HiveOperator
from airflow.utils import timezone
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
class TestHiveEnvironment(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.hql = """
USE airflow;
DROP TABLE IF EXISTS static_babynames_partitioned;
CREATE TABLE IF NOT EXISTS static_babynames_partitioned (
state string,
year string,
name string,
gender string,
num int)
PARTITIONED BY (ds string);
INSERT OVERWRITE TABLE static_babynames_partitioned
PARTITION(ds='{{ ds }}')
SELECT state, year, name, gender, num FROM static_babynames;
"""
class TestHiveCli(unittest.TestCase):
def setUp(self):
self.nondefault_schema = "nondefault"
os.environ["AIRFLOW__CORE__SECURITY"] = "kerberos"
def tearDown(self):
del os.environ["AIRFLOW__CORE__SECURITY"]
def test_get_proxy_user_value(self):
from airflow.hooks.hive_hooks import HiveCliHook
hook = HiveCliHook()
returner = mock.MagicMock()
returner.extra_dejson = {'proxy_user': 'a_user_proxy'}
hook.use_beeline = True
hook.conn = returner
# Run
result = hook._prepare_cli_cmd()
# Verify
self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2])
class HiveOperatorConfigTest(TestHiveEnvironment):
def test_hive_airflow_default_config_queue(self):
t = HiveOperator(
task_id='test_default_config_queue',
hql=self.hql,
mapred_queue_priority='HIGH',
mapred_job_name='airflow.test_default_config_queue',
dag=self.dag)
# just check that the correct default value in test_default.cfg is used
test_config_hive_mapred_queue = configuration.conf.get(
'hive',
'default_hive_mapred_queue'
)
self.assertEqual(t.get_hook().mapred_queue, test_config_hive_mapred_queue)
def test_hive_airflow_default_config_queue_override(self):
specific_mapred_queue = 'default'
t = HiveOperator(
task_id='test_default_config_queue',
hql=self.hql,
mapred_queue=specific_mapred_queue,
mapred_queue_priority='HIGH',
mapred_job_name='airflow.test_default_config_queue',
dag=self.dag)
self.assertEqual(t.get_hook().mapred_queue, specific_mapred_queue)
class HiveOperatorTest(TestHiveEnvironment):
def test_hiveconf_jinja_translate(self):
hql = "SELECT ${num_col} FROM ${hiveconf:table};"
t = HiveOperator(
hiveconf_jinja_translate=True,
task_id='dry_run_basic_hql', hql=hql, dag=self.dag)
t.prepare_template()
self.assertEqual(t.hql, "SELECT {{ num_col }} FROM {{ table }};")
def test_hiveconf(self):
hql = "SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});"
t = HiveOperator(
hiveconfs={'table': 'static_babynames', 'day': '{{ ds }}'},
task_id='dry_run_basic_hql', hql=hql, dag=self.dag)
t.prepare_template()
self.assertEqual(
t.hql,
"SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});")
@mock.patch('airflow.operators.hive_operator.HiveOperator.get_hook')
def test_mapred_job_name(self, mock_get_hook):
mock_hook = mock.MagicMock()
mock_get_hook.return_value = mock_hook
t = HiveOperator(
task_id='test_mapred_job_name',
hql=self.hql,
dag=self.dag)
fake_execution_date = timezone.datetime(2018, 6, 19)
fake_ti = TaskInstance(task=t, execution_date=fake_execution_date)
fake_ti.hostname = 'fake_hostname'
fake_context = {'ti': fake_ti}
t.execute(fake_context)
self.assertEqual(
"Airflow HiveOperator task for {}.{}.{}.{}"
.format(fake_ti.hostname,
self.dag.dag_id, t.task_id,
fake_execution_date.isoformat()), mock_hook.mapred_job_name)
if 'AIRFLOW_RUNALL_TESTS' in os.environ:
import airflow.hooks.hive_hooks
import airflow.operators.presto_to_mysql
class TestHivePresto(TestHiveEnvironment):
def test_hive(self):
t = HiveOperator(
task_id='basic_hql', hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_queues(self):
t = HiveOperator(
task_id='test_hive_queues', hql=self.hql,
mapred_queue='default', mapred_queue_priority='HIGH',
mapred_job_name='airflow.test_hive_queues',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_dryrun(self):
t = HiveOperator(
task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag)
t.dry_run()
def test_beeline(self):
t = HiveOperator(
task_id='beeline_hql', hive_cli_conn_id='hive_cli_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_presto(self):
sql = """
SELECT count(1) FROM airflow.static_babynames_partitioned;
"""
t = operators.presto_check_operator.PrestoCheckOperator(
task_id='presto_check', sql=sql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_presto_to_mysql(self):
t = operators.presto_to_mysql.PrestoToMySqlTransfer(
task_id='presto_to_mysql_check',
sql="""
SELECT name, count(*) as ccount
FROM airflow.static_babynames
GROUP BY name
""",
mysql_table='test_static_babynames',
mysql_preoperator='TRUNCATE TABLE test_static_babynames;',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hdfs_sensor(self):
t = operators.sensors.HdfsSensor(
task_id='hdfs_sensor_check',
filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_webhdfs_sensor(self):
t = operators.sensors.WebHdfsSensor(
task_id='webhdfs_sensor_check',
filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',
timeout=120,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_sql_sensor(self):
t = operators.sensors.SqlSensor(
task_id='hdfs_sensor_check',
conn_id='presto_default',
sql="SELECT 'x' FROM airflow.static_babynames LIMIT 1;",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_stats(self):
t = operators.hive_stats_operator.HiveStatsCollectionOperator(
task_id='hive_stats_check',
table="airflow.static_babynames_partitioned",
partition={'ds': DEFAULT_DATE_DS},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}"
],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}",
"airflow.static_babynames_partitioned/ds={{ds}}"
],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor_parses_partitions_with_periods(self):
t = operators.sensors.NamedHivePartitionSensor.parse_partition_name(
partition="schema.table/part1=this.can.be.an.issue/part2=ok")
self.assertEqual(t[0], "schema")
self.assertEqual(t[1], "table")
self.assertEqual(t[2], "part1=this.can.be.an.issue/part2=this_should_be_ok")
@nose.tools.raises(airflow.exceptions.AirflowSensorTimeout)
def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}",
"airflow.static_babynames_partitioned/ds=nonexistent"
],
poke_interval=0.1,
timeout=1,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_partition_sensor(self):
t = operators.sensors.HivePartitionSensor(
task_id='hive_partition_check',
table='airflow.static_babynames_partitioned',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_metastore_sql_sensor(self):
t = operators.sensors.MetastorePartitionSensor(
task_id='hive_partition_check',
table='airflow.static_babynames_partitioned',
partition_name='ds={}'.format(DEFAULT_DATE_DS),
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive2samba(self):
t = operators.hive_to_samba_operator.Hive2SambaOperator(
task_id='hive2samba_check',
samba_conn_id='tableau_samba',
hql="SELECT * FROM airflow.static_babynames LIMIT 10000",
destination_filepath='test_airflow.csv',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_to_mysql(self):
t = operators.hive_to_mysql.HiveToMySqlTransfer(
mysql_conn_id='airflow_db',
task_id='hive_to_mysql_check',
create=True,
sql="""
SELECT name
FROM airflow.static_babynames
LIMIT 100
""",
mysql_table='test_static_babynames',
mysql_preoperator=[
'DROP TABLE IF EXISTS test_static_babynames;',
'CREATE TABLE test_static_babynames (name VARCHAR(500))',
],
dag=self.dag)
t.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
| [((1093, 1122), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(1)', '(1)'], {}), '(2015, 1, 1)\n', (1110, 1122), False, 'import datetime\n'), ((4517, 4584), 'unittest.mock.patch', 'mock.patch', (['"""airflow.operators.hive_operator.HiveOperator.get_hook"""'], {}), "('airflow.operators.hive_operator.HiveOperator.get_hook')\n", (4527, 4584), False, 'from unittest import mock\n'), ((1355, 1392), 'airflow.DAG', 'DAG', (['"""test_dag_id"""'], {'default_args': 'args'}), "('test_dag_id', default_args=args)\n", (1358, 1392), False, 'from airflow import DAG, configuration, operators\n'), ((2280, 2293), 'airflow.hooks.hive_hooks.HiveCliHook', 'HiveCliHook', ([], {}), '()\n', (2291, 2293), False, 'from airflow.hooks.hive_hooks import HiveCliHook\n'), ((2313, 2329), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2327, 2329), False, 'from unittest import mock\n'), ((2721, 2890), 'airflow.operators.hive_operator.HiveOperator', 'HiveOperator', ([], {'task_id': '"""test_default_config_queue"""', 'hql': 'self.hql', 'mapred_queue_priority': '"""HIGH"""', 'mapred_job_name': '"""airflow.test_default_config_queue"""', 'dag': 'self.dag'}), "(task_id='test_default_config_queue', hql=self.hql,\n mapred_queue_priority='HIGH', mapred_job_name=\n 'airflow.test_default_config_queue', dag=self.dag)\n", (2733, 2890), False, 'from airflow.operators.hive_operator import HiveOperator\n'), ((3064, 3123), 'airflow.configuration.conf.get', 'configuration.conf.get', (['"""hive"""', '"""default_hive_mapred_queue"""'], {}), "('hive', 'default_hive_mapred_queue')\n", (3086, 3123), False, 'from airflow import DAG, configuration, operators\n'), ((3359, 3563), 'airflow.operators.hive_operator.HiveOperator', 'HiveOperator', ([], {'task_id': '"""test_default_config_queue"""', 'hql': 'self.hql', 'mapred_queue': 'specific_mapred_queue', 'mapred_queue_priority': '"""HIGH"""', 'mapred_job_name': '"""airflow.test_default_config_queue"""', 'dag': 'self.dag'}), "(task_id='test_default_config_queue', hql=self.hql,\n mapred_queue=specific_mapred_queue, mapred_queue_priority='HIGH',\n mapred_job_name='airflow.test_default_config_queue', dag=self.dag)\n", (3371, 3563), False, 'from airflow.operators.hive_operator import HiveOperator\n'), ((3868, 3967), 'airflow.operators.hive_operator.HiveOperator', 'HiveOperator', ([], {'hiveconf_jinja_translate': '(True)', 'task_id': '"""dry_run_basic_hql"""', 'hql': 'hql', 'dag': 'self.dag'}), "(hiveconf_jinja_translate=True, task_id='dry_run_basic_hql',\n hql=hql, dag=self.dag)\n", (3880, 3967), False, 'from airflow.operators.hive_operator import HiveOperator\n'), ((4211, 4339), 'airflow.operators.hive_operator.HiveOperator', 'HiveOperator', ([], {'hiveconfs': "{'table': 'static_babynames', 'day': '{{ ds }}'}", 'task_id': '"""dry_run_basic_hql"""', 'hql': 'hql', 'dag': 'self.dag'}), "(hiveconfs={'table': 'static_babynames', 'day': '{{ ds }}'},\n task_id='dry_run_basic_hql', hql=hql, dag=self.dag)\n", (4223, 4339), False, 'from airflow.operators.hive_operator import HiveOperator\n'), ((4656, 4672), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4670, 4672), False, 'from unittest import mock\n'), ((4732, 4804), 'airflow.operators.hive_operator.HiveOperator', 'HiveOperator', ([], {'task_id': '"""test_mapred_job_name"""', 'hql': 'self.hql', 'dag': 'self.dag'}), "(task_id='test_mapred_job_name', hql=self.hql, dag=self.dag)\n", (4744, 4804), False, 'from airflow.operators.hive_operator import HiveOperator\n'), ((4873, 4903), 'airflow.utils.timezone.datetime', 'timezone.datetime', (['(2018)', '(6)', '(19)'], {}), '(2018, 6, 19)\n', (4890, 4903), False, 'from airflow.utils import timezone\n'), ((4922, 4978), 'airflow.models.TaskInstance', 'TaskInstance', ([], {'task': 't', 'execution_date': 'fake_execution_date'}), '(task=t, execution_date=fake_execution_date)\n', (4934, 4978), False, 'from airflow.models import TaskInstance\n'), ((10442, 10500), 'nose.tools.raises', 'nose.tools.raises', (['airflow.exceptions.AirflowSensorTimeout'], {}), '(airflow.exceptions.AirflowSensorTimeout)\n', (10459, 10500), False, 'import nose\n'), ((5562, 5623), 'airflow.operators.hive_operator.HiveOperator', 'HiveOperator', ([], {'task_id': '"""basic_hql"""', 'hql': 'self.hql', 'dag': 'self.dag'}), "(task_id='basic_hql', hql=self.hql, dag=self.dag)\n", (5574, 5623), False, 'from airflow.operators.hive_operator import HiveOperator\n'), ((5800, 5976), 'airflow.operators.hive_operator.HiveOperator', 'HiveOperator', ([], {'task_id': '"""test_hive_queues"""', 'hql': 'self.hql', 'mapred_queue': '"""default"""', 'mapred_queue_priority': '"""HIGH"""', 'mapred_job_name': '"""airflow.test_hive_queues"""', 'dag': 'self.dag'}), "(task_id='test_hive_queues', hql=self.hql, mapred_queue=\n 'default', mapred_queue_priority='HIGH', mapred_job_name=\n 'airflow.test_hive_queues', dag=self.dag)\n", (5812, 5976), False, 'from airflow.operators.hive_operator import HiveOperator\n'), ((6191, 6260), 'airflow.operators.hive_operator.HiveOperator', 'HiveOperator', ([], {'task_id': '"""dry_run_basic_hql"""', 'hql': 'self.hql', 'dag': 'self.dag'}), "(task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag)\n", (6203, 6260), False, 'from airflow.operators.hive_operator import HiveOperator\n'), ((6351, 6455), 'airflow.operators.hive_operator.HiveOperator', 'HiveOperator', ([], {'task_id': '"""beeline_hql"""', 'hive_cli_conn_id': '"""hive_cli_default"""', 'hql': 'self.hql', 'dag': 'self.dag'}), "(task_id='beeline_hql', hive_cli_conn_id='hive_cli_default',\n hql=self.hql, dag=self.dag)\n", (6363, 6455), False, 'from airflow.operators.hive_operator import HiveOperator\n'), ((6748, 6850), 'airflow.operators.presto_check_operator.PrestoCheckOperator', 'operators.presto_check_operator.PrestoCheckOperator', ([], {'task_id': '"""presto_check"""', 'sql': 'sql', 'dag': 'self.dag'}), "(task_id='presto_check',\n sql=sql, dag=self.dag)\n", (6799, 6850), False, 'from airflow import DAG, configuration, operators\n'), ((7027, 7390), 'airflow.operators.presto_to_mysql.PrestoToMySqlTransfer', 'operators.presto_to_mysql.PrestoToMySqlTransfer', ([], {'task_id': '"""presto_to_mysql_check"""', 'sql': '"""\n SELECT name, count(*) as ccount\n FROM airflow.static_babynames\n GROUP BY name\n """', 'mysql_table': '"""test_static_babynames"""', 'mysql_preoperator': '"""TRUNCATE TABLE test_static_babynames;"""', 'dag': 'self.dag'}), '(task_id=\n \'presto_to_mysql_check\', sql=\n """\n SELECT name, count(*) as ccount\n FROM airflow.static_babynames\n GROUP BY name\n """\n , mysql_table=\'test_static_babynames\', mysql_preoperator=\n \'TRUNCATE TABLE test_static_babynames;\', dag=self.dag)\n', (7074, 7390), False, 'from airflow import DAG, configuration, operators\n'), ((7611, 7754), 'airflow.operators.sensors.HdfsSensor', 'operators.sensors.HdfsSensor', ([], {'task_id': '"""hdfs_sensor_check"""', 'filepath': '"""hdfs://user/hive/warehouse/airflow.db/static_babynames"""', 'dag': 'self.dag'}), "(task_id='hdfs_sensor_check', filepath=\n 'hdfs://user/hive/warehouse/airflow.db/static_babynames', dag=self.dag)\n", (7639, 7754), False, 'from airflow import DAG, configuration, operators\n'), ((7961, 8127), 'airflow.operators.sensors.WebHdfsSensor', 'operators.sensors.WebHdfsSensor', ([], {'task_id': '"""webhdfs_sensor_check"""', 'filepath': '"""hdfs://user/hive/warehouse/airflow.db/static_babynames"""', 'timeout': '(120)', 'dag': 'self.dag'}), "(task_id='webhdfs_sensor_check', filepath=\n 'hdfs://user/hive/warehouse/airflow.db/static_babynames', timeout=120,\n dag=self.dag)\n", (7992, 8127), False, 'from airflow import DAG, configuration, operators\n'), ((8342, 8505), 'airflow.operators.sensors.SqlSensor', 'operators.sensors.SqlSensor', ([], {'task_id': '"""hdfs_sensor_check"""', 'conn_id': '"""presto_default"""', 'sql': '"""SELECT \'x\' FROM airflow.static_babynames LIMIT 1;"""', 'dag': 'self.dag'}), '(task_id=\'hdfs_sensor_check\', conn_id=\n \'presto_default\', sql=\n "SELECT \'x\' FROM airflow.static_babynames LIMIT 1;", dag=self.dag)\n', (8369, 8505), False, 'from airflow import DAG, configuration, operators\n'), ((8719, 8908), 'airflow.operators.hive_stats_operator.HiveStatsCollectionOperator', 'operators.hive_stats_operator.HiveStatsCollectionOperator', ([], {'task_id': '"""hive_stats_check"""', 'table': '"""airflow.static_babynames_partitioned"""', 'partition': "{'ds': DEFAULT_DATE_DS}", 'dag': 'self.dag'}), "(task_id=\n 'hive_stats_check', table='airflow.static_babynames_partitioned',\n partition={'ds': DEFAULT_DATE_DS}, dag=self.dag)\n", (8776, 8908), False, 'from airflow import DAG, configuration, operators\n'), ((9140, 9305), 'airflow.operators.sensors.NamedHivePartitionSensor', 'operators.sensors.NamedHivePartitionSensor', ([], {'task_id': '"""hive_partition_check"""', 'partition_names': "['airflow.static_babynames_partitioned/ds={{ds}}']", 'dag': 'self.dag'}), "(task_id='hive_partition_check',\n partition_names=['airflow.static_babynames_partitioned/ds={{ds}}'], dag\n =self.dag)\n", (9182, 9305), False, 'from airflow import DAG, configuration, operators\n'), ((9591, 9805), 'airflow.operators.sensors.NamedHivePartitionSensor', 'operators.sensors.NamedHivePartitionSensor', ([], {'task_id': '"""hive_partition_check"""', 'partition_names': "['airflow.static_babynames_partitioned/ds={{ds}}',\n 'airflow.static_babynames_partitioned/ds={{ds}}']", 'dag': 'self.dag'}), "(task_id='hive_partition_check',\n partition_names=['airflow.static_babynames_partitioned/ds={{ds}}',\n 'airflow.static_babynames_partitioned/ds={{ds}}'], dag=self.dag)\n", (9633, 9805), False, 'from airflow import DAG, configuration, operators\n'), ((10111, 10241), 'airflow.operators.sensors.NamedHivePartitionSensor.parse_partition_name', 'operators.sensors.NamedHivePartitionSensor.parse_partition_name', ([], {'partition': '"""schema.table/part1=this.can.be.an.issue/part2=ok"""'}), "(partition=\n 'schema.table/part1=this.can.be.an.issue/part2=ok')\n", (10174, 10241), False, 'from airflow import DAG, configuration, operators\n'), ((10604, 10858), 'airflow.operators.sensors.NamedHivePartitionSensor', 'operators.sensors.NamedHivePartitionSensor', ([], {'task_id': '"""hive_partition_check"""', 'partition_names': "['airflow.static_babynames_partitioned/ds={{ds}}',\n 'airflow.static_babynames_partitioned/ds=nonexistent']", 'poke_interval': '(0.1)', 'timeout': '(1)', 'dag': 'self.dag'}), "(task_id='hive_partition_check',\n partition_names=['airflow.static_babynames_partitioned/ds={{ds}}',\n 'airflow.static_babynames_partitioned/ds=nonexistent'], poke_interval=\n 0.1, timeout=1, dag=self.dag)\n", (10646, 10858), False, 'from airflow import DAG, configuration, operators\n'), ((11154, 11288), 'airflow.operators.sensors.HivePartitionSensor', 'operators.sensors.HivePartitionSensor', ([], {'task_id': '"""hive_partition_check"""', 'table': '"""airflow.static_babynames_partitioned"""', 'dag': 'self.dag'}), "(task_id='hive_partition_check', table\n ='airflow.static_babynames_partitioned', dag=self.dag)\n", (11191, 11288), False, 'from airflow import DAG, configuration, operators\n'), ((11912, 12149), 'airflow.operators.hive_to_samba_operator.Hive2SambaOperator', 'operators.hive_to_samba_operator.Hive2SambaOperator', ([], {'task_id': '"""hive2samba_check"""', 'samba_conn_id': '"""tableau_samba"""', 'hql': '"""SELECT * FROM airflow.static_babynames LIMIT 10000"""', 'destination_filepath': '"""test_airflow.csv"""', 'dag': 'self.dag'}), "(task_id=\n 'hive2samba_check', samba_conn_id='tableau_samba', hql=\n 'SELECT * FROM airflow.static_babynames LIMIT 10000',\n destination_filepath='test_airflow.csv', dag=self.dag)\n", (11963, 12149), False, 'from airflow import DAG, configuration, operators\n'), ((12378, 12821), 'airflow.operators.hive_to_mysql.HiveToMySqlTransfer', 'operators.hive_to_mysql.HiveToMySqlTransfer', ([], {'mysql_conn_id': '"""airflow_db"""', 'task_id': '"""hive_to_mysql_check"""', 'create': '(True)', 'sql': '"""\n SELECT name\n FROM airflow.static_babynames\n LIMIT 100\n """', 'mysql_table': '"""test_static_babynames"""', 'mysql_preoperator': "['DROP TABLE IF EXISTS test_static_babynames;',\n 'CREATE TABLE test_static_babynames (name VARCHAR(500))']", 'dag': 'self.dag'}), '(mysql_conn_id=\'airflow_db\',\n task_id=\'hive_to_mysql_check\', create=True, sql=\n """\n SELECT name\n FROM airflow.static_babynames\n LIMIT 100\n """\n , mysql_table=\'test_static_babynames\', mysql_preoperator=[\n \'DROP TABLE IF EXISTS test_static_babynames;\',\n \'CREATE TABLE test_static_babynames (name VARCHAR(500))\'], dag=self.dag)\n', (12421, 12821), False, 'from airflow import DAG, configuration, operators\n')] |
OrionDark7/Alakajam12 | main.py | 4f9f8f87a05feb718baddb12aa8cbbed3e36a071 | import pygame, math
from game import map, ui
window = pygame.display.set_mode([800, 600])
ui.window = window
screen = "game"
s = {"fullscreen": False}
running = True
gamedata = {"level": 0, "coal": 0, "iron": 1, "copper":0}
tiles = pygame.sprite.Group()
rails = pygame.sprite.Group()
carts = pygame.sprite.Group()
interactables = pygame.sprite.Group()
listmap = []
clock = pygame.time.Clock()
selected = pygame.image.load("./resources/images/selected.png")
selected2 = pygame.image.load("./resources/images/selected2.png")
box = pygame.image.load("./resources/images/box.png")
uibox = pygame.image.load("./resources/images/ui box.png")
class Mouse(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.surface.Surface([1, 1])
self.rect = self.image.get_rect()
self.rect.topleft = [0, 0]
self.clickedcart = None
self.hoveritem = None
self.tl = self.rect.topleft
self.mode = "select"
def pos(self, position):
self.rect.topleft = position
self.tl = self.rect.topleft
m = Mouse()
def snaptogrid(pos):
return [int(math.floor(pos[0] / 40)), int(math.floor(pos[1] / 40))]
def loadlevel(number):
global tiles, rails, carts, gamedata, listmap, interactables
tiles, rails, interactables, listmap = map.loadmap(int(number))
carts.empty()
gamedata["level"] = number
gamedata["coal"] = 0
gamedata["iron"] = 1
gamedata["copper"] = 0
loadlevel(0)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
m.pos(pygame.mouse.get_pos())
if screen == "game":
if pygame.sprite.spritecollide(m, carts, False) and m.mode == "select":
carts.update("select", m, listmap)
if m.clickedcart != None:
m.mode = "action"
elif m.mode == "action" and m.clickedcart != None and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0:
m.clickedcart.pathfind(listmap, snaptogrid(m.tl))
m.clickedcart = None
m.mode = "select"
elif event.type == pygame.MOUSEMOTION:
m.pos(pygame.mouse.get_pos())
if screen == "game":
m.hoveritem = None
if len(pygame.sprite.spritecollide(m, carts, False)) > 0:
m.hoveritem = pygame.sprite.spritecollide(m, carts, False)[0]
elif len(pygame.sprite.spritecollide(m, interactables, False)) > 0:
m.hoveritem = pygame.sprite.spritecollide(m, interactables, False)[0]
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
carts.add(map.Cart(snaptogrid(m.tl), "miner"))
if screen == "game":
window.fill([100, 100, 100])
tiles.draw(window)
carts.draw(window)
carts.update("update", m, listmap)
if not m.hoveritem == None and not m.mode == "action":
window.blit(box, [m.rect.left+10, m.rect.top+10])
ui.Resize(30)
ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25])
if m.hoveritem.type.startswith("mine") and m.hoveritem not in carts:
ui.Resize(18)
ui.Text("Carts Inside: " + str(m.hoveritem.data["carts"]), [m.rect.left+27, m.rect.top+47])
ui.Text("Max Carts: " + str(m.hoveritem.data["max"]), [m.rect.left+27, m.rect.top+60])
if not m.clickedcart == None:
window.blit(selected2, [m.clickedcart.rect.left-2, m.clickedcart.rect.top-2])
if m.mode == "action":
window.blit(box, [m.rect.left+10, m.rect.top+10])
ui.Resize(30)
try:
ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25])
except:
ui.Text(m.clickedcart.type.upper(), [m.rect.left+27, m.rect.top+25])
if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0:
ui.Resize(22)
ui.Text("Click to move", [m.rect.left+27, m.rect.top+45])
ui.Text("Cart Here", [m.rect.left+27, m.rect.top+60])
window.blit(selected, [snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2])
window.blit(uibox, [555, 475])
pygame.display.flip()
clock.tick(60)
fps = clock.get_fps()
pygame.quit()
| [((55, 90), 'pygame.display.set_mode', 'pygame.display.set_mode', (['[800, 600]'], {}), '([800, 600])\n', (78, 90), False, 'import pygame, math\n'), ((233, 254), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (252, 254), False, 'import pygame, math\n'), ((263, 284), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (282, 284), False, 'import pygame, math\n'), ((293, 314), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (312, 314), False, 'import pygame, math\n'), ((331, 352), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (350, 352), False, 'import pygame, math\n'), ((374, 393), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (391, 393), False, 'import pygame, math\n'), ((405, 457), 'pygame.image.load', 'pygame.image.load', (['"""./resources/images/selected.png"""'], {}), "('./resources/images/selected.png')\n", (422, 457), False, 'import pygame, math\n'), ((470, 523), 'pygame.image.load', 'pygame.image.load', (['"""./resources/images/selected2.png"""'], {}), "('./resources/images/selected2.png')\n", (487, 523), False, 'import pygame, math\n'), ((530, 577), 'pygame.image.load', 'pygame.image.load', (['"""./resources/images/box.png"""'], {}), "('./resources/images/box.png')\n", (547, 577), False, 'import pygame, math\n'), ((586, 636), 'pygame.image.load', 'pygame.image.load', (['"""./resources/images/ui box.png"""'], {}), "('./resources/images/ui box.png')\n", (603, 636), False, 'import pygame, math\n'), ((4539, 4552), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4550, 4552), False, 'import pygame, math\n'), ((1540, 1558), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1556, 1558), False, 'import pygame, math\n'), ((4472, 4493), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4491, 4493), False, 'import pygame, math\n'), ((705, 740), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (734, 740), False, 'import pygame, math\n'), ((762, 792), 'pygame.surface.Surface', 'pygame.surface.Surface', (['[1, 1]'], {}), '([1, 1])\n', (784, 792), False, 'import pygame, math\n'), ((1153, 1176), 'math.floor', 'math.floor', (['(pos[0] / 40)'], {}), '(pos[0] / 40)\n', (1163, 1176), False, 'import pygame, math\n'), ((1183, 1206), 'math.floor', 'math.floor', (['(pos[1] / 40)'], {}), '(pos[1] / 40)\n', (1193, 1206), False, 'import pygame, math\n'), ((3190, 3203), 'game.ui.Resize', 'ui.Resize', (['(30)'], {}), '(30)\n', (3199, 3203), False, 'from game import map, ui\n'), ((3380, 3393), 'game.ui.Resize', 'ui.Resize', (['(18)'], {}), '(18)\n', (3389, 3393), False, 'from game import map, ui\n'), ((3850, 3863), 'game.ui.Resize', 'ui.Resize', (['(30)'], {}), '(30)\n', (3859, 3863), False, 'from game import map, ui\n'), ((1695, 1717), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (1715, 1717), False, 'import pygame, math\n'), ((4179, 4192), 'game.ui.Resize', 'ui.Resize', (['(22)'], {}), '(22)\n', (4188, 4192), False, 'from game import map, ui\n'), ((4213, 4274), 'game.ui.Text', 'ui.Text', (['"""Click to move"""', '[m.rect.left + 27, m.rect.top + 45]'], {}), "('Click to move', [m.rect.left + 27, m.rect.top + 45])\n", (4220, 4274), False, 'from game import map, ui\n'), ((4291, 4348), 'game.ui.Text', 'ui.Text', (['"""Cart Here"""', '[m.rect.left + 27, m.rect.top + 60]'], {}), "('Cart Here', [m.rect.left + 27, m.rect.top + 60])\n", (4298, 4348), False, 'from game import map, ui\n'), ((1771, 1815), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['m', 'carts', '(False)'], {}), '(m, carts, False)\n', (1798, 1815), False, 'import pygame, math\n'), ((2322, 2344), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (2342, 2344), False, 'import pygame, math\n'), ((2437, 2481), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['m', 'carts', '(False)'], {}), '(m, carts, False)\n', (2464, 2481), False, 'import pygame, math\n'), ((2522, 2566), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['m', 'carts', '(False)'], {}), '(m, carts, False)\n', (2549, 2566), False, 'import pygame, math\n'), ((2595, 2647), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['m', 'interactables', '(False)'], {}), '(m, interactables, False)\n', (2622, 2647), False, 'import pygame, math\n'), ((2688, 2740), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['m', 'interactables', '(False)'], {}), '(m, interactables, False)\n', (2715, 2740), False, 'import pygame, math\n')] |
AbdullahNoori/CS-2.1-Trees-Sorting | Code/extract_method3.py | 59ba182d60abe6171a3d7d64981f79ee192de3bb | # Written by Kamran Bigdely
# Example for Compose Methods: Extract Method.
import math
def get_distance(xc1=5, xc2=7.25, yc1=22, yc2=-4.84):
# Calculate the distance between the two circle
return math.sqrt((xc1-xc2)**2 + (yc1 - yc2)**2)
print('distance', get_distance())
# *** somewhere else in your program ***
def get_length(xa=-50, ya=99, xb=.67, yb=.26):
# calcualte the length of vector AB vector which is a vector between A and B points.
return math.sqrt((xa-xb)*(xa-xb) + (ya-yb)*(ya-yb))
print('length', get_length())
| [((206, 252), 'math.sqrt', 'math.sqrt', (['((xc1 - xc2) ** 2 + (yc1 - yc2) ** 2)'], {}), '((xc1 - xc2) ** 2 + (yc1 - yc2) ** 2)\n', (215, 252), False, 'import math\n'), ((473, 529), 'math.sqrt', 'math.sqrt', (['((xa - xb) * (xa - xb) + (ya - yb) * (ya - yb))'], {}), '((xa - xb) * (xa - xb) + (ya - yb) * (ya - yb))\n', (482, 529), False, 'import math\n')] |
Abhi58/sympy | sympy/integrals/prde.py | 5ca228b17a7d44ef08a268ba1fa959d5763634af | """
Algorithms for solving Parametric Risch Differential Equations.
The methods used for solving Parametric Risch Differential Equations parallel
those for solving Risch Differential Equations. See the outline in the
docstring of rde.py for more information.
The Parametric Risch Differential Equation problem is, given f, g1, ..., gm in
K(t), to determine if there exist y in K(t) and c1, ..., cm in Const(K) such
that Dy + f*y == Sum(ci*gi, (i, 1, m)), and to find such y and ci if they exist.
For the algorithms here G is a list of tuples of factions of the terms on the
right hand side of the equation (i.e., gi in k(t)), and Q is a list of terms on
the right hand side of the equation (i.e., qi in k[t]). See the docstring of
each function for more information.
"""
from __future__ import print_function, division
from sympy.core import Dummy, ilcm, Add, Mul, Pow, S
from sympy.core.compatibility import reduce, range
from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer,
bound_degree)
from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation,
residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel,
recognize_log_derivative)
from sympy.matrices import zeros, eye
from sympy.polys import Poly, lcm, cancel, sqf_list
from sympy.polys.polymatrix import PolyMatrix as Matrix
from sympy.solvers import solve
def prde_normal_denom(fa, fd, G, DE):
"""
Parametric Risch Differential Equation - Normal part of the denominator.
Given a derivation D on k[t] and f, g1, ..., gm in k(t) with f weakly
normalized with respect to t, return the tuple (a, b, G, h) such that
a, h in k[t], b in k<t>, G = [g1, ..., gm] in k(t)^m, and for any solution
c1, ..., cm in Const(k) and y in k(t) of Dy + f*y == Sum(ci*gi, (i, 1, m)),
q == y*h in k<t> satisfies a*Dq + b*q == Sum(ci*Gi, (i, 1, m)).
"""
dn, ds = splitfactor(fd, DE)
Gas, Gds = list(zip(*G))
gd = reduce(lambda i, j: i.lcm(j), Gds, Poly(1, DE.t))
en, es = splitfactor(gd, DE)
p = dn.gcd(en)
h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t)))
a = dn*h
c = a*h
ba = a*fa - dn*derivation(h, DE)*fd
ba, bd = ba.cancel(fd, include=True)
G = [(c*A).cancel(D, include=True) for A, D in G]
return (a, (ba, bd), G, h)
def real_imag(ba, bd, gen):
"""
Helper function, to get the real and imaginary part of a rational function
evaluated at sqrt(-1) without actually evaluating it at sqrt(-1)
Separates the even and odd power terms by checking the degree of terms wrt
mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is real part
of the numerator ba[1] is the imaginary part and bd is the denominator
of the rational function.
"""
bd = bd.as_poly(gen).as_dict()
ba = ba.as_poly(gen).as_dict()
denom_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in bd.items()]
denom_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in bd.items()]
bd_real = sum(r for r in denom_real)
bd_imag = sum(r for r in denom_imag)
num_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in ba.items()]
num_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in ba.items()]
ba_real = sum(r for r in num_real)
ba_imag = sum(r for r in num_imag)
ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen))
bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen)
return (ba[0], ba[1], bd)
def prde_special_denom(a, ba, bd, G, DE, case='auto'):
"""
Parametric Risch Differential Equation - Special part of the denominator.
case is one of {'exp', 'tan', 'primitive'} for the hyperexponential,
hypertangent, and primitive cases, respectively. For the hyperexponential
(resp. hypertangent) case, given a derivation D on k[t] and a in k[t],
b in k<t>, and g1, ..., gm in k(t) with Dt/t in k (resp. Dt/(t**2 + 1) in
k, sqrt(-1) not in k), a != 0, and gcd(a, t) == 1 (resp.
gcd(a, t**2 + 1) == 1), return the tuple (A, B, GG, h) such that A, B, h in
k[t], GG = [gg1, ..., ggm] in k(t)^m, and for any solution c1, ..., cm in
Const(k) and q in k<t> of a*Dq + b*q == Sum(ci*gi, (i, 1, m)), r == q*h in
k[t] satisfies A*Dr + B*r == Sum(ci*ggi, (i, 1, m)).
For case == 'primitive', k<t> == k[t], so it returns (a, b, G, 1) in this
case.
"""
# TODO: Merge this with the very similar special_denom() in rde.py
if case == 'auto':
case = DE.case
if case == 'exp':
p = Poly(DE.t, DE.t)
elif case == 'tan':
p = Poly(DE.t**2 + 1, DE.t)
elif case in ['primitive', 'base']:
B = ba.quo(bd)
return (a, B, G, Poly(1, DE.t))
else:
raise ValueError("case must be one of {'exp', 'tan', 'primitive', "
"'base'}, not %s." % case)
nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t)
nc = min([order_at(Ga, p, DE.t) - order_at(Gd, p, DE.t) for Ga, Gd in G])
n = min(0, nc - min(0, nb))
if not nb:
# Possible cancellation.
if case == 'exp':
dcoeff = DE.d.quo(Poly(DE.t, DE.t))
with DecrementLevel(DE): # We are guaranteed to not have problems,
# because case != 'base'.
alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t)
etaa, etad = frac_in(dcoeff, DE.t)
A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)
if A is not None:
Q, m, z = A
if Q == 1:
n = min(n, m)
elif case == 'tan':
dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t))
with DecrementLevel(DE): # We are guaranteed to not have problems,
# because case != 'base'.
betaa, alphaa, alphad = real_imag(ba, bd*a, DE.t)
betad = alphad
etaa, etad = frac_in(dcoeff, DE.t)
if recognize_log_derivative(2*betaa, betad, DE):
A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)
B = parametric_log_deriv(betaa, betad, etaa, etad, DE)
if A is not None and B is not None:
Q, s, z = A
# TODO: Add test
if Q == 1:
n = min(n, s/2)
N = max(0, -nb)
pN = p**N
pn = p**-n # This is 1/h
A = a*pN
B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN
G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in G]
h = pn
# (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N - n), p**-n)
return (A, B, G, h)
def prde_linear_constraints(a, b, G, DE):
"""
Parametric Risch Differential Equation - Generate linear constraints on the constants.
Given a derivation D on k[t], a, b, in k[t] with gcd(a, b) == 1, and
G = [g1, ..., gm] in k(t)^m, return Q = [q1, ..., qm] in k[t]^m and a
matrix M with entries in k(t) such that for any solution c1, ..., cm in
Const(k) and p in k[t] of a*Dp + b*p == Sum(ci*gi, (i, 1, m)),
(c1, ..., cm) is a solution of Mx == 0, and p and the ci satisfy
a*Dp + b*p == Sum(ci*qi, (i, 1, m)).
Because M has entries in k(t), and because Matrix doesn't play well with
Poly, M will be a Matrix of Basic expressions.
"""
m = len(G)
Gns, Gds = list(zip(*G))
d = reduce(lambda i, j: i.lcm(j), Gds)
d = Poly(d, field=True)
Q = [(ga*(d).quo(gd)).div(d) for ga, gd in G]
if not all([ri.is_zero for _, ri in Q]):
N = max([ri.degree(DE.t) for _, ri in Q])
M = Matrix(N + 1, m, lambda i, j: Q[j][1].nth(i))
else:
M = Matrix(0, m, []) # No constraints, return the empty matrix.
qs, _ = list(zip(*Q))
return (qs, M)
def poly_linear_constraints(p, d):
"""
Given p = [p1, ..., pm] in k[t]^m and d in k[t], return
q = [q1, ..., qm] in k[t]^m and a matrix M with entries in k such
that Sum(ci*pi, (i, 1, m)), for c1, ..., cm in k, is divisible
by d if and only if (c1, ..., cm) is a solution of Mx = 0, in
which case the quotient is Sum(ci*qi, (i, 1, m)).
"""
m = len(p)
q, r = zip(*[pi.div(d) for pi in p])
if not all([ri.is_zero for ri in r]):
n = max([ri.degree() for ri in r])
M = Matrix(n + 1, m, lambda i, j: r[j].nth(i))
else:
M = Matrix(0, m, []) # No constraints.
return q, M
def constant_system(A, u, DE):
"""
Generate a system for the constant solutions.
Given a differential field (K, D) with constant field C = Const(K), a Matrix
A, and a vector (Matrix) u with coefficients in K, returns the tuple
(B, v, s), where B is a Matrix with coefficients in C and v is a vector
(Matrix) such that either v has coefficients in C, in which case s is True
and the solutions in C of Ax == u are exactly all the solutions of Bx == v,
or v has a non-constant coefficient, in which case s is False Ax == u has no
constant solution.
This algorithm is used both in solving parametric problems and in
determining if an element a of K is a derivative of an element of K or the
logarithmic derivative of a K-radical using the structure theorem approach.
Because Poly does not play well with Matrix yet, this algorithm assumes that
all matrix entries are Basic expressions.
"""
if not A:
return A, u
Au = A.row_join(u)
Au = Au.rref(simplify=cancel, normalize_last=False)[0]
# Warning: This will NOT return correct results if cancel() cannot reduce
# an identically zero expression to 0. The danger is that we might
# incorrectly prove that an integral is nonelementary (such as
# risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x).
# But this is a limitation in computer algebra in general, and implicit
# in the correctness of the Risch Algorithm is the computability of the
# constant field (actually, this same correctness problem exists in any
# algorithm that uses rref()).
#
# We therefore limit ourselves to constant fields that are computable
# via the cancel() function, in order to prevent a speed bottleneck from
# calling some more complex simplification function (rational function
# coefficients will fall into this class). Furthermore, (I believe) this
# problem will only crop up if the integral explicitly contains an
# expression in the constant field that is identically zero, but cannot
# be reduced to such by cancel(). Therefore, a careful user can avoid this
# problem entirely by being careful with the sorts of expressions that
# appear in his integrand in the variables other than the integration
# variable (the structure theorems should be able to completely decide these
# problems in the integration variable).
Au = Au.applyfunc(cancel)
A, u = Au[:, :-1], Au[:, -1]
for j in range(A.cols):
for i in range(A.rows):
if A[i, j].has(*DE.T):
# This assumes that const(F(t0, ..., tn) == const(K) == F
Ri = A[i, :]
# Rm+1; m = A.rows
Rm1 = Ri.applyfunc(lambda x: derivation(x, DE, basic=True)/
derivation(A[i, j], DE, basic=True))
Rm1 = Rm1.applyfunc(cancel)
um1 = cancel(derivation(u[i], DE, basic=True)/
derivation(A[i, j], DE, basic=True))
for s in range(A.rows):
# A[s, :] = A[s, :] - A[s, i]*A[:, m+1]
Asj = A[s, j]
A.row_op(s, lambda r, jj: cancel(r - Asj*Rm1[jj]))
# u[s] = u[s] - A[s, j]*u[m+1
u.row_op(s, lambda r, jj: cancel(r - Asj*um1))
A = A.col_join(Rm1)
u = u.col_join(Matrix([um1]))
return (A, u)
def prde_spde(a, b, Q, n, DE):
"""
Special Polynomial Differential Equation algorithm: Parametric Version.
Given a derivation D on k[t], an integer n, and a, b, q1, ..., qm in k[t]
with deg(a) > 0 and gcd(a, b) == 1, return (A, B, Q, R, n1), with
Qq = [q1, ..., qm] and R = [r1, ..., rm], such that for any solution
c1, ..., cm in Const(k) and q in k[t] of degree at most n of
a*Dq + b*q == Sum(ci*gi, (i, 1, m)), p = (q - Sum(ci*ri, (i, 1, m)))/a has
degree at most n1 and satisfies A*Dp + B*p == Sum(ci*qi, (i, 1, m))
"""
R, Z = list(zip(*[gcdex_diophantine(b, a, qi) for qi in Q]))
A = a
B = b + derivation(a, DE)
Qq = [zi - derivation(ri, DE) for ri, zi in zip(R, Z)]
R = list(R)
n1 = n - a.degree(DE.t)
return (A, B, Qq, R, n1)
def prde_no_cancel_b_large(b, Q, n, DE):
"""
Parametric Poly Risch Differential Equation - No cancellation: deg(b) large enough.
Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with
b != 0 and either D == d/dt or deg(b) > max(0, deg(D) - 1), returns
h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that
if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and
Dq + b*q == Sum(ci*qi, (i, 1, m)), then q = Sum(dj*hj, (j, 1, r)), where
d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.
"""
db = b.degree(DE.t)
m = len(Q)
H = [Poly(0, DE.t)]*m
for N in range(n, -1, -1): # [n, ..., 0]
for i in range(m):
si = Q[i].nth(N + db)/b.LC()
sitn = Poly(si*DE.t**N, DE.t)
H[i] = H[i] + sitn
Q[i] = Q[i] - derivation(sitn, DE) - b*sitn
if all(qi.is_zero for qi in Q):
dc = -1
M = zeros(0, 2)
else:
dc = max([qi.degree(DE.t) for qi in Q])
M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i))
A, u = constant_system(M, zeros(dc + 1, 1), DE)
c = eye(m)
A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c))
return (H, A)
def prde_no_cancel_b_small(b, Q, n, DE):
"""
Parametric Poly Risch Differential Equation - No cancellation: deg(b) small enough.
Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with
deg(b) < deg(D) - 1 and either D == d/dt or deg(D) >= 2, returns
h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that
if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and
Dq + b*q == Sum(ci*qi, (i, 1, m)) then q = Sum(dj*hj, (j, 1, r)) where
d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.
"""
m = len(Q)
H = [Poly(0, DE.t)]*m
for N in range(n, 0, -1): # [n, ..., 1]
for i in range(m):
si = Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC())
sitn = Poly(si*DE.t**N, DE.t)
H[i] = H[i] + sitn
Q[i] = Q[i] - derivation(sitn, DE) - b*sitn
if b.degree(DE.t) > 0:
for i in range(m):
si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t)
H[i] = H[i] + si
Q[i] = Q[i] - derivation(si, DE) - b*si
if all(qi.is_zero for qi in Q):
dc = -1
M = Matrix()
else:
dc = max([qi.degree(DE.t) for qi in Q])
M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i))
A, u = constant_system(M, zeros(dc + 1, 1), DE)
c = eye(m)
A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c))
return (H, A)
# else: b is in k, deg(qi) < deg(Dt)
t = DE.t
if DE.case != 'base':
with DecrementLevel(DE):
t0 = DE.t # k = k0(t0)
ba, bd = frac_in(b, t0, field=True)
Q0 = [frac_in(qi.TC(), t0, field=True) for qi in Q]
f, B = param_rischDE(ba, bd, Q0, DE)
# f = [f1, ..., fr] in k^r and B is a matrix with
# m + r columns and entries in Const(k) = Const(k0)
# such that Dy0 + b*y0 = Sum(ci*qi, (i, 1, m)) has
# a solution y0 in k with c1, ..., cm in Const(k)
# if and only y0 = Sum(dj*fj, (j, 1, r)) where
# d1, ..., dr ar in Const(k) and
# B*Matrix([c1, ..., cm, d1, ..., dr]) == 0.
# Transform fractions (fa, fd) in f into constant
# polynomials fa/fd in k[t].
# (Is there a better way?)
f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True)
for fa, fd in f]
else:
# Base case. Dy == 0 for all y in k and b == 0.
# Dy + b*y = Sum(ci*qi) is solvable if and only if
# Sum(ci*qi) == 0 in which case the solutions are
# y = d1*f1 for f1 = 1 and any d1 in Const(k) = k.
f = [Poly(1, t, field=True)] # r = 1
B = Matrix([[qi.TC() for qi in Q] + [S(0)]])
# The condition for solvability is
# B*Matrix([c1, ..., cm, d1]) == 0
# There are no constraints on d1.
# Coefficients of t^j (j > 0) in Sum(ci*qi) must be zero.
d = max([qi.degree(DE.t) for qi in Q])
if d > 0:
M = Matrix(d, m, lambda i, j: Q[j].nth(i + 1))
A, _ = constant_system(M, zeros(d, 1), DE)
else:
# No constraints on the hj.
A = Matrix(0, m, [])
# Solutions of the original equation are
# y = Sum(dj*fj, (j, 1, r) + Sum(ei*hi, (i, 1, m)),
# where ei == ci (i = 1, ..., m), when
# A*Matrix([c1, ..., cm]) == 0 and
# B*Matrix([c1, ..., cm, d1, ..., dr]) == 0
# Build combined constraint matrix with m + r + m columns.
r = len(f)
I = eye(m)
A = A.row_join(zeros(A.rows, r + m))
B = B.row_join(zeros(B.rows, m))
C = I.row_join(zeros(m, r)).row_join(-I)
return f + H, A.col_join(B).col_join(C)
def prde_cancel_liouvillian(b, Q, n, DE):
"""
Pg, 237.
"""
H = []
# Why use DecrementLevel? Below line answers that:
# Assuming that we can solve such problems over 'k' (not k[t])
if DE.case == 'primitive':
with DecrementLevel(DE):
ba, bd = frac_in(b, DE.t, field=True)
for i in range(n, -1, -1):
if DE.case == 'exp': # this re-checking can be avoided
with DecrementLevel(DE):
ba, bd = frac_in(b + i*derivation(DE.t, DE)/DE.t,
DE.t, field=True)
with DecrementLevel(DE):
Qy = [frac_in(q.nth(i), DE.t, field=True) for q in Q]
fi, Ai = param_rischDE(ba, bd, Qy, DE)
fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True)
for fa, fd in fi]
ri = len(fi)
if i == n:
M = Ai
else:
M = Ai.col_join(M.row_join(zeros(M.rows, ri)))
Fi, hi = [None]*ri, [None]*ri
# from eq. on top of p.238 (unnumbered)
for j in range(ri):
hji = fi[j]*DE.t**i
hi[j] = hji
# building up Sum(djn*(D(fjn*t^n) - b*fjnt^n))
Fi[j] = -(derivation(hji, DE) - b*hji)
H += hi
# in the next loop instead of Q it has
# to be Q + Fi taking its place
Q = Q + Fi
return (H, M)
def param_poly_rischDE(a, b, q, n, DE):
"""Polynomial solutions of a parametric Risch differential equation.
Given a derivation D in k[t], a, b in k[t] relatively prime, and q
= [q1, ..., qm] in k[t]^m, return h = [h1, ..., hr] in k[t]^r and
a matrix A with m + r columns and entries in Const(k) such that
a*Dp + b*p = Sum(ci*qi, (i, 1, m)) has a solution p of degree <= n
in k[t] with c1, ..., cm in Const(k) if and only if p = Sum(dj*hj,
(j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm,
d1, ..., dr) is a solution of Ax == 0.
"""
m = len(q)
if n < 0:
# Only the trivial zero solution is possible.
# Find relations between the qi.
if all([qi.is_zero for qi in q]):
return [], zeros(1, m) # No constraints.
N = max([qi.degree(DE.t) for qi in q])
M = Matrix(N + 1, m, lambda i, j: q[j].nth(i))
A, _ = constant_system(M, zeros(M.rows, 1), DE)
return [], A
if a.is_ground:
# Normalization: a = 1.
a = a.LC()
b, q = b.quo_ground(a), [qi.quo_ground(a) for qi in q]
if not b.is_zero and (DE.case == 'base' or
b.degree() > max(0, DE.d.degree() - 1)):
return prde_no_cancel_b_large(b, q, n, DE)
elif ((b.is_zero or b.degree() < DE.d.degree() - 1)
and (DE.case == 'base' or DE.d.degree() >= 2)):
return prde_no_cancel_b_small(b, q, n, DE)
elif (DE.d.degree() >= 2 and
b.degree() == DE.d.degree() - 1 and
n > -b.as_poly().LC()/DE.d.as_poly().LC()):
raise NotImplementedError("prde_no_cancel_b_equal() is "
"not yet implemented.")
else:
# Liouvillian cases
if DE.case == 'primitive' or DE.case == 'exp':
return prde_cancel_liouvillian(b, q, n, DE)
else:
raise NotImplementedError("non-linear and hypertangent "
"cases have not yet been implemented")
# else: deg(a) > 0
# Iterate SPDE as long as possible cumulating coefficient
# and terms for the recovery of original solutions.
alpha, beta = 1, [0]*m
while n >= 0: # and a, b relatively prime
a, b, q, r, n = prde_spde(a, b, q, n, DE)
beta = [betai + alpha*ri for betai, ri in zip(beta, r)]
alpha *= a
# Solutions p of a*Dp + b*p = Sum(ci*qi) correspond to
# solutions alpha*p + Sum(ci*betai) of the initial equation.
d = a.gcd(b)
if not d.is_ground:
break
# a*Dp + b*p = Sum(ci*qi) may have a polynomial solution
# only if the sum is divisible by d.
qq, M = poly_linear_constraints(q, d)
# qq = [qq1, ..., qqm] where qqi = qi.quo(d).
# M is a matrix with m columns an entries in k.
# Sum(fi*qi, (i, 1, m)), where f1, ..., fm are elements of k, is
# divisible by d if and only if M*Matrix([f1, ..., fm]) == 0,
# in which case the quotient is Sum(fi*qqi).
A, _ = constant_system(M, zeros(M.rows, 1), DE)
# A is a matrix with m columns and entries in Const(k).
# Sum(ci*qqi) is Sum(ci*qi).quo(d), and the remainder is zero
# for c1, ..., cm in Const(k) if and only if
# A*Matrix([c1, ...,cm]) == 0.
V = A.nullspace()
# V = [v1, ..., vu] where each vj is a column matrix with
# entries aj1, ..., ajm in Const(k).
# Sum(aji*qi) is divisible by d with exact quotient Sum(aji*qqi).
# Sum(ci*qi) is divisible by d if and only if ci = Sum(dj*aji)
# (i = 1, ..., m) for some d1, ..., du in Const(k).
# In that case, solutions of
# a*Dp + b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi))
# are the same as those of
# (a/d)*Dp + (b/d)*p = Sum(dj*rj)
# where rj = Sum(aji*qqi).
if not V: # No non-trivial solution.
return [], eye(m) # Could return A, but this has
# the minimum number of rows.
Mqq = Matrix([qq]) # A single row.
r = [(Mqq*vj)[0] for vj in V] # [r1, ..., ru]
# Solutions of (a/d)*Dp + (b/d)*p = Sum(dj*rj) correspond to
# solutions alpha*p + Sum(Sum(dj*aji)*betai) of the initial
# equation. These are equal to alpha*p + Sum(dj*fj) where
# fj = Sum(aji*betai).
Mbeta = Matrix([beta])
f = [(Mbeta*vj)[0] for vj in V] # [f1, ..., fu]
#
# Solve the reduced equation recursively.
#
g, B = param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE)
# g = [g1, ..., gv] in k[t]^v and and B is a matrix with u + v
# columns and entries in Const(k) such that
# (a/d)*Dp + (b/d)*p = Sum(dj*rj) has a solution p of degree <= n
# in k[t] if and only if p = Sum(ek*gk) where e1, ..., ev are in
# Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0.
# The solutions of the original equation are then
# Sum(dj*fj, (j, 1, u)) + alpha*Sum(ek*gk, (k, 1, v)).
# Collect solution components.
h = f + [alpha*gk for gk in g]
# Build combined relation matrix.
A = -eye(m)
for vj in V:
A = A.row_join(vj)
A = A.row_join(zeros(m, len(g)))
A = A.col_join(zeros(B.rows, m).row_join(B))
return h, A
def param_rischDE(fa, fd, G, DE):
"""
Solve a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)).
Given a derivation D in k(t), f in k(t), and G
= [G1, ..., Gm] in k(t)^m, return h = [h1, ..., hr] in k(t)^r and
a matrix A with m + r columns and entries in Const(k) such that
Dy + f*y = Sum(ci*Gi, (i, 1, m)) has a solution y
in k(t) with c1, ..., cm in Const(k) if and only if y = Sum(dj*hj,
(j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm,
d1, ..., dr) is a solution of Ax == 0.
Elements of k(t) are tuples (a, d) with a and d in k[t].
"""
m = len(G)
q, (fa, fd) = weak_normalizer(fa, fd, DE)
# Solutions of the weakly normalized equation Dz + f*z = q*Sum(ci*Gi)
# correspond to solutions y = z/q of the original equation.
gamma = q
G = [(q*ga).cancel(gd, include=True) for ga, gd in G]
a, (ba, bd), G, hn = prde_normal_denom(fa, fd, G, DE)
# Solutions q in k<t> of a*Dq + b*q = Sum(ci*Gi) correspond
# to solutions z = q/hn of the weakly normalized equation.
gamma *= hn
A, B, G, hs = prde_special_denom(a, ba, bd, G, DE)
# Solutions p in k[t] of A*Dp + B*p = Sum(ci*Gi) correspond
# to solutions q = p/hs of the previous equation.
gamma *= hs
g = A.gcd(B)
a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for
gia, gid in G]
# a*Dp + b*p = Sum(ci*gi) may have a polynomial solution
# only if the sum is in k[t].
q, M = prde_linear_constraints(a, b, g, DE)
# q = [q1, ..., qm] where qi in k[t] is the polynomial component
# of the partial fraction expansion of gi.
# M is a matrix with m columns and entries in k.
# Sum(fi*gi, (i, 1, m)), where f1, ..., fm are elements of k,
# is a polynomial if and only if M*Matrix([f1, ..., fm]) == 0,
# in which case the sum is equal to Sum(fi*qi).
M, _ = constant_system(M, zeros(M.rows, 1), DE)
# M is a matrix with m columns and entries in Const(k).
# Sum(ci*gi) is in k[t] for c1, ..., cm in Const(k)
# if and only if M*Matrix([c1, ..., cm]) == 0,
# in which case the sum is Sum(ci*qi).
## Reduce number of constants at this point
V = M.nullspace()
# V = [v1, ..., vu] where each vj is a column matrix with
# entries aj1, ..., ajm in Const(k).
# Sum(aji*gi) is in k[t] and equal to Sum(aji*qi) (j = 1, ..., u).
# Sum(ci*gi) is in k[t] if and only is ci = Sum(dj*aji)
# (i = 1, ..., m) for some d1, ..., du in Const(k).
# In that case,
# Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj)
# where rj = Sum(aji*qi) (j = 1, ..., u) in k[t].
if not V: # No non-trivial solution
return [], eye(m)
Mq = Matrix([q]) # A single row.
r = [(Mq*vj)[0] for vj in V] # [r1, ..., ru]
# Solutions of a*Dp + b*p = Sum(dj*rj) correspond to solutions
# y = p/gamma of the initial equation with ci = Sum(dj*aji).
try:
# We try n=5. At least for prde_spde, it will always
# terminate no matter what n is.
n = bound_degree(a, b, r, DE, parametric=True)
except NotImplementedError:
# A temporary bound is set. Eventually, it will be removed.
# the currently added test case takes large time
# even with n=5, and much longer with large n's.
n = 5
h, B = param_poly_rischDE(a, b, r, n, DE)
# h = [h1, ..., hv] in k[t]^v and and B is a matrix with u + v
# columns and entries in Const(k) such that
# a*Dp + b*p = Sum(dj*rj) has a solution p of degree <= n
# in k[t] if and only if p = Sum(ek*hk) where e1, ..., ev are in
# Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0.
# The solutions of the original equation for ci = Sum(dj*aji)
# (i = 1, ..., m) are then y = Sum(ek*hk, (k, 1, v))/gamma.
## Build combined relation matrix with m + u + v columns.
A = -eye(m)
for vj in V:
A = A.row_join(vj)
A = A.row_join(zeros(m, len(h)))
A = A.col_join(zeros(B.rows, m).row_join(B))
## Eliminate d1, ..., du.
W = A.nullspace()
# W = [w1, ..., wt] where each wl is a column matrix with
# entries blk (k = 1, ..., m + u + v) in Const(k).
# The vectors (bl1, ..., blm) generate the space of those
# constant families (c1, ..., cm) for which a solution of
# the equation Dy + f*y == Sum(ci*Gi) exists. They generate
# the space and form a basis except possibly when Dy + f*y == 0
# is solvable in k(t}. The corresponding solutions are
# y = Sum(blk'*hk, (k, 1, v))/gamma, where k' = k + m + u.
v = len(h)
M = Matrix([wl[:m] + wl[-v:] for wl in W]) # excise dj's.
N = M.nullspace()
# N = [n1, ..., ns] where the ni in Const(k)^(m + v) are column
# vectors generating the space of linear relations between
# c1, ..., cm, e1, ..., ev.
C = Matrix([ni[:] for ni in N]) # rows n1, ..., ns.
return [hk.cancel(gamma, include=True) for hk in h], C
def limited_integrate_reduce(fa, fd, G, DE):
"""
Simpler version of step 1 & 2 for the limited integration problem.
Given a derivation D on k(t) and f, g1, ..., gn in k(t), return
(a, b, h, N, g, V) such that a, b, h in k[t], N is a non-negative integer,
g in k(t), V == [v1, ..., vm] in k(t)^m, and for any solution v in k(t),
c1, ..., cm in C of f == Dv + Sum(ci*wi, (i, 1, m)), p = v*h is in k<t>, and
p and the ci satisfy a*Dp + b*p == g + Sum(ci*vi, (i, 1, m)). Furthermore,
if S1irr == Sirr, then p is in k[t], and if t is nonlinear or Liouvillian
over k, then deg(p) <= N.
So that the special part is always computed, this function calls the more
general prde_special_denom() automatically if it cannot determine that
S1irr == Sirr. Furthermore, it will automatically call bound_degree() when
t is linear and non-Liouvillian, which for the transcendental case, implies
that Dt == a*t + b with for some a, b in k*.
"""
dn, ds = splitfactor(fd, DE)
E = [splitfactor(gd, DE) for _, gd in G]
En, Es = list(zip(*E))
c = reduce(lambda i, j: i.lcm(j), (dn,) + En) # lcm(dn, en1, ..., enm)
hn = c.gcd(c.diff(DE.t))
a = hn
b = -derivation(hn, DE)
N = 0
# These are the cases where we know that S1irr = Sirr, but there could be
# others, and this algorithm will need to be extended to handle them.
if DE.case in ['base', 'primitive', 'exp', 'tan']:
hs = reduce(lambda i, j: i.lcm(j), (ds,) + Es) # lcm(ds, es1, ..., esm)
a = hn*hs
b -= (hn*derivation(hs, DE)).quo(hs)
mu = min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd, DE.t) for
ga, gd in G]))
# So far, all the above are also nonlinear or Liouvillian, but if this
# changes, then this will need to be updated to call bound_degree()
# as per the docstring of this function (DE.case == 'other_linear').
N = hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t) - mu)
else:
# TODO: implement this
raise NotImplementedError
V = [(-a*hn*ga).cancel(gd, include=True) for ga, gd in G]
return (a, b, a, N, (a*hn*fa).cancel(fd, include=True), V)
def limited_integrate(fa, fd, G, DE):
"""
Solves the limited integration problem: f = Dv + Sum(ci*wi, (i, 1, n))
"""
fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic()
# interpretting limited integration problem as a
# parametric Risch DE problem
Fa = Poly(0, DE.t)
Fd = Poly(1, DE.t)
G = [(fa, fd)] + G
h, A = param_rischDE(Fa, Fd, G, DE)
V = A.nullspace()
V = [v for v in V if v[0] != 0]
if not V:
return None
else:
# we can take any vector from V, we take V[0]
c0 = V[0][0]
# v = [-1, c1, ..., cm, d1, ..., dr]
v = V[0]/(-c0)
r = len(h)
m = len(v) - r - 1
C = list(v[1: m + 1])
y = -sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \
for i in range(r)])
y_num, y_den = y.as_numer_denom()
Ya, Yd = Poly(y_num, DE.t), Poly(y_den, DE.t)
Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic()
return Y, C
def parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None):
"""
Parametric logarithmic derivative heuristic.
Given a derivation D on k[t], f in k(t), and a hyperexponential monomial
theta over k(t), raises either NotImplementedError, in which case the
heuristic failed, or returns None, in which case it has proven that no
solution exists, or returns a solution (n, m, v) of the equation
n*f == Dv/v + m*Dtheta/theta, with v in k(t)* and n, m in ZZ with n != 0.
If this heuristic fails, the structure theorem approach will need to be
used.
The argument w == Dtheta/theta
"""
# TODO: finish writing this and write tests
c1 = c1 or Dummy('c1')
p, a = fa.div(fd)
q, b = wa.div(wd)
B = max(0, derivation(DE.t, DE).degree(DE.t) - 1)
C = max(p.degree(DE.t), q.degree(DE.t))
if q.degree(DE.t) > B:
eqs = [p.nth(i) - c1*q.nth(i) for i in range(B + 1, C + 1)]
s = solve(eqs, c1)
if not s or not s[c1].is_Rational:
# deg(q) > B, no solution for c.
return None
M, N = s[c1].as_numer_denom()
nfmwa = N*fa*wd - M*wa*fd
nfmwd = fd*wd
Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd, DE,
'auto')
if Qv is None:
# (N*f - M*w) is not the logarithmic derivative of a k(t)-radical.
return None
Q, v = Qv
if Q.is_zero or v.is_zero:
return None
return (Q*N, Q*M, v)
if p.degree(DE.t) > B:
return None
c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC())
l = fd.monic().lcm(wd.monic())*Poly(c, DE.t)
ln, ls = splitfactor(l, DE)
z = ls*ln.gcd(ln.diff(DE.t))
if not z.has(DE.t):
# TODO: We treat this as 'no solution', until the structure
# theorem version of parametric_log_deriv is implemented.
return None
u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z)
u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z)
eqs = [r1.nth(i) - c1*r2.nth(i) for i in range(z.degree(DE.t))]
s = solve(eqs, c1)
if not s or not s[c1].is_Rational:
# deg(q) <= B, no solution for c.
return None
M, N = s[c1].as_numer_denom()
nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd
nfmwd = fd*wd
Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE)
if Qv is None:
# (N*f - M*w) is not the logarithmic derivative of a k(t)-radical.
return None
Q, v = Qv
if Q.is_zero or v.is_zero:
return None
return (Q*N, Q*M, v)
def parametric_log_deriv(fa, fd, wa, wd, DE):
# TODO: Write the full algorithm using the structure theorems.
# try:
A = parametric_log_deriv_heu(fa, fd, wa, wd, DE)
# except NotImplementedError:
# Heuristic failed, we have to use the full method.
# TODO: This could be implemented more efficiently.
# It isn't too worrisome, because the heuristic handles most difficult
# cases.
return A
def is_deriv_k(fa, fd, DE):
r"""
Checks if Df/f is the derivative of an element of k(t).
a in k(t) is the derivative of an element of k(t) if there exists b in k(t)
such that a = Db. Either returns (ans, u), such that Df/f == Du, or None,
which means that Df/f is not the derivative of an element of k(t). ans is
a list of tuples such that Add(*[i*j for i, j in ans]) == u. This is useful
for seeing exactly which elements of k(t) produce u.
This function uses the structure theorem approach, which says that for any
f in K, Df/f is the derivative of a element of K if and only if there are ri
in QQ such that::
--- --- Dt
\ r * Dt + \ r * i Df
/ i i / i --- = --.
--- --- t f
i in L i in E i
K/C(x) K/C(x)
Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
hyperexponential monomials of K over C(x)). If K is an elementary extension
over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
transcendence degree of K over C(x). Furthermore, because Const_D(K) ==
Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
and L_K/C(x) are disjoint.
The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
recursively using this same function. Therefore, it is required to pass
them as indices to D (or T). E_args are the arguments of the
hyperexponentials indexed by E_K (i.e., if i is in E_K, then T[i] ==
exp(E_args[i])). This is needed to compute the final answer u such that
Df/f == Du.
log(f) will be the same as u up to a additive constant. This is because
they will both behave the same as monomials. For example, both log(x) and
log(2*x) == log(x) + log(2) satisfy Dt == 1/x, because log(2) is constant.
Therefore, the term const is returned. const is such that
log(const) + f == u. This is calculated by dividing the arguments of one
logarithm from the other. Therefore, it is necessary to pass the arguments
of the logarithmic terms in L_args.
To handle the case where we are given Df/f, not f, use is_deriv_k_in_field().
See also
========
is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical
"""
# Compute Df/f
dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd*fa
dfa, dfd = dfa.cancel(dfd, include=True)
# Our assumption here is that each monomial is recursively transcendental
if len(DE.exts) != len(DE.D):
if [i for i in DE.cases if i == 'tan'] or \
(set([i for i in DE.cases if i == 'primitive']) -
set(DE.indices('log'))):
raise NotImplementedError("Real version of the structure "
"theorems with hypertangent support is not yet implemented.")
# TODO: What should really be done in this case?
raise NotImplementedError("Nonelementary extensions not supported "
"in the structure theorems.")
E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')]
L_part = [DE.D[i].as_expr() for i in DE.indices('log')]
lhs = Matrix([E_part + L_part])
rhs = Matrix([dfa.as_expr()/dfd.as_expr()])
A, u = constant_system(lhs, rhs, DE)
if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A:
# If the elements of u are not all constant
# Note: See comment in constant_system
# Also note: derivation(basic=True) calls cancel()
return None
else:
if not all(i.is_Rational for i in u):
raise NotImplementedError("Cannot work with non-rational "
"coefficients in this case.")
else:
terms = ([DE.extargs[i] for i in DE.indices('exp')] +
[DE.T[i] for i in DE.indices('log')])
ans = list(zip(terms, u))
result = Add(*[Mul(i, j) for i, j in ans])
argterms = ([DE.T[i] for i in DE.indices('exp')] +
[DE.extargs[i] for i in DE.indices('log')])
l = []
ld = []
for i, j in zip(argterms, u):
# We need to get around things like sqrt(x**2) != x
# and also sqrt(x**2 + 2*x + 1) != x + 1
# Issue 10798: i need not be a polynomial
i, d = i.as_numer_denom()
icoeff, iterms = sqf_list(i)
l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b, e in iterms])))
dcoeff, dterms = sqf_list(d)
ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for b, e in dterms])))
const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld))
return (ans, result, const)
def is_log_deriv_k_t_radical(fa, fd, DE, Df=True):
r"""
Checks if Df is the logarithmic derivative of a k(t)-radical.
b in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*b == Du/u.
Either returns (ans, u, n, const) or None, which means that Df cannot be
written as the logarithmic derivative of a k(t)-radical. ans is a list of
tuples such that Mul(*[i**j for i, j in ans]) == u. This is useful for
seeing exactly what elements of k(t) produce u.
This function uses the structure theorem approach, which says that for any
f in K, Df is the logarithmic derivative of a K-radical if and only if there
are ri in QQ such that::
--- --- Dt
\ r * Dt + \ r * i
/ i i / i --- = Df.
--- --- t
i in L i in E i
K/C(x) K/C(x)
Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
hyperexponential monomials of K over C(x)). If K is an elementary extension
over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
transcendence degree of K over C(x). Furthermore, because Const_D(K) ==
Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
and L_K/C(x) are disjoint.
The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
recursively using this same function. Therefore, it is required to pass
them as indices to D (or T). L_args are the arguments of the logarithms
indexed by L_K (i.e., if i is in L_K, then T[i] == log(L_args[i])). This is
needed to compute the final answer u such that n*f == Du/u.
exp(f) will be the same as u up to a multiplicative constant. This is
because they will both behave the same as monomials. For example, both
exp(x) and exp(x + 1) == E*exp(x) satisfy Dt == t. Therefore, the term const
is returned. const is such that exp(const)*f == u. This is calculated by
subtracting the arguments of one exponential from the other. Therefore, it
is necessary to pass the arguments of the exponential terms in E_args.
To handle the case where we are given Df, not f, use
is_log_deriv_k_t_radical_in_field().
See also
========
is_log_deriv_k_t_radical_in_field, is_deriv_k
"""
H = []
if Df:
dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2,
include=True)
else:
dfa, dfd = fa, fd
# Our assumption here is that each monomial is recursively transcendental
if len(DE.exts) != len(DE.D):
if [i for i in DE.cases if i == 'tan'] or \
(set([i for i in DE.cases if i == 'primitive']) -
set(DE.indices('log'))):
raise NotImplementedError("Real version of the structure "
"theorems with hypertangent support is not yet implemented.")
# TODO: What should really be done in this case?
raise NotImplementedError("Nonelementary extensions not supported "
"in the structure theorems.")
E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')]
L_part = [DE.D[i].as_expr() for i in DE.indices('log')]
lhs = Matrix([E_part + L_part])
rhs = Matrix([dfa.as_expr()/dfd.as_expr()])
A, u = constant_system(lhs, rhs, DE)
if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A:
# If the elements of u are not all constant
# Note: See comment in constant_system
# Also note: derivation(basic=True) calls cancel()
return None
else:
if not all(i.is_Rational for i in u):
# TODO: But maybe we can tell if they're not rational, like
# log(2)/log(3). Also, there should be an option to continue
# anyway, even if the result might potentially be wrong.
raise NotImplementedError("Cannot work with non-rational "
"coefficients in this case.")
else:
n = reduce(ilcm, [i.as_numer_denom()[1] for i in u])
u *= n
terms = ([DE.T[i] for i in DE.indices('exp')] +
[DE.extargs[i] for i in DE.indices('log')])
ans = list(zip(terms, u))
result = Mul(*[Pow(i, j) for i, j in ans])
# exp(f) will be the same as result up to a multiplicative
# constant. We now find the log of that constant.
argterms = ([DE.extargs[i] for i in DE.indices('exp')] +
[DE.T[i] for i in DE.indices('log')])
const = cancel(fa.as_expr()/fd.as_expr() -
Add(*[Mul(i, j/n) for i, j in zip(argterms, u)]))
return (ans, result, n, const)
def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None):
"""
Checks if f can be written as the logarithmic derivative of a k(t)-radical.
It differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False)
for any given fa, fd, DE in that it finds the solution in the
given field not in some (possibly unspecified extension) and
"in_field" with the function name is used to indicate that.
f in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*f == Du/u.
Either returns (n, u) or None, which means that f cannot be written as the
logarithmic derivative of a k(t)-radical.
case is one of {'primitive', 'exp', 'tan', 'auto'} for the primitive,
hyperexponential, and hypertangent cases, respectively. If case is 'auto',
it will attempt to determine the type of the derivation automatically.
See also
========
is_log_deriv_k_t_radical, is_deriv_k
"""
fa, fd = fa.cancel(fd, include=True)
# f must be simple
n, s = splitfactor(fd, DE)
if not s.is_one:
pass
z = z or Dummy('z')
H, b = residue_reduce(fa, fd, DE, z=z)
if not b:
# I will have to verify, but I believe that the answer should be
# None in this case. This should never happen for the
# functions given when solving the parametric logarithmic
# derivative problem when integration elementary functions (see
# Bronstein's book, page 255), so most likely this indicates a bug.
return None
roots = [(i, i.real_roots()) for i, _ in H]
if not all(len(j) == i.degree() and all(k.is_Rational for k in j) for
i, j in roots):
# If f is the logarithmic derivative of a k(t)-radical, then all the
# roots of the resultant must be rational numbers.
return None
# [(a, i), ...], where i*log(a) is a term in the log-part of the integral
# of f
respolys, residues = list(zip(*roots)) or [[], []]
# Note: this might be empty, but everything below should work find in that
# case (it should be the same as if it were [[1, 1]])
residueterms = [(H[j][1].subs(z, i), i) for j in range(len(H)) for
i in residues[j]]
# TODO: finish writing this and write tests
p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z))
p = p.as_poly(DE.t)
if p is None:
# f - Dg will be in k[t] if f is the logarithmic derivative of a k(t)-radical
return None
if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)):
return None
if case == 'auto':
case = DE.case
if case == 'exp':
wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True)
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t, cancel=True)
wa, wd = frac_in((wa, wd), DE.t)
A = parametric_log_deriv(pa, pd, wa, wd, DE)
if A is None:
return None
n, e, u = A
u *= DE.t**e
elif case == 'primitive':
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t)
A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto')
if A is None:
return None
n, u = A
elif case == 'base':
# TODO: we can use more efficient residue reduction from ratint()
if not fd.is_sqf or fa.degree() >= fd.degree():
# f is the logarithmic derivative in the base case if and only if
# f = fa/fd, fd is square-free, deg(fa) < deg(fd), and
# gcd(fa, fd) == 1. The last condition is handled by cancel() above.
return None
# Note: if residueterms = [], returns (1, 1)
# f had better be 0 in that case.
n = reduce(ilcm, [i.as_numer_denom()[1] for _, i in residueterms], S(1))
u = Mul(*[Pow(i, j*n) for i, j in residueterms])
return (n, u)
elif case == 'tan':
raise NotImplementedError("The hypertangent case is "
"not yet implemented for is_log_deriv_k_t_radical_in_field()")
elif case in ['other_linear', 'other_nonlinear']:
# XXX: If these are supported by the structure theorems, change to NotImplementedError.
raise ValueError("The %s case is not supported in this function." % case)
else:
raise ValueError("case must be one of {'primitive', 'exp', 'tan', "
"'base', 'auto'}, not %s" % case)
common_denom = reduce(ilcm, [i.as_numer_denom()[1] for i in [j for _, j in
residueterms]] + [n], S(1))
residueterms = [(i, j*common_denom) for i, j in residueterms]
m = common_denom//n
if common_denom != n*m: # Verify exact division
raise ValueError("Inexact division")
u = cancel(u**m*Mul(*[Pow(i, j) for i, j in residueterms]))
return (common_denom, u)
| [((1901, 1920), 'sympy.integrals.risch.splitfactor', 'splitfactor', (['fd', 'DE'], {}), '(fd, DE)\n', (1912, 1920), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((2022, 2041), 'sympy.integrals.risch.splitfactor', 'splitfactor', (['gd', 'DE'], {}), '(gd, DE)\n', (2033, 2041), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((7665, 7684), 'sympy.polys.Poly', 'Poly', (['d'], {'field': '(True)'}), '(d, field=True)\n', (7669, 7684), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((11160, 11173), 'sympy.core.compatibility.range', 'range', (['A.cols'], {}), '(A.cols)\n', (11165, 11173), False, 'from sympy.core.compatibility import reduce, range\n'), ((13582, 13598), 'sympy.core.compatibility.range', 'range', (['n', '(-1)', '(-1)'], {}), '(n, -1, -1)\n', (13587, 13598), False, 'from sympy.core.compatibility import reduce, range\n'), ((14063, 14069), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (14066, 14069), False, 'from sympy.matrices import zeros, eye\n'), ((14800, 14815), 'sympy.core.compatibility.range', 'range', (['n', '(0)', '(-1)'], {}), '(n, 0, -1)\n', (14805, 14815), False, 'from sympy.core.compatibility import reduce, range\n'), ((17669, 17675), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (17672, 17675), False, 'from sympy.matrices import zeros, eye\n'), ((18179, 18195), 'sympy.core.compatibility.range', 'range', (['n', '(-1)', '(-1)'], {}), '(n, -1, -1)\n', (18184, 18195), False, 'from sympy.core.compatibility import reduce, range\n'), ((23184, 23196), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[qq]'], {}), '([qq])\n', (23190, 23196), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((23496, 23510), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[beta]'], {}), '([beta])\n', (23502, 23510), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((25045, 25072), 'sympy.integrals.rde.weak_normalizer', 'weak_normalizer', (['fa', 'fd', 'DE'], {}), '(fa, fd, DE)\n', (25060, 25072), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((27136, 27147), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[q]'], {}), '([q])\n', (27142, 27147), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((29013, 29053), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[(wl[:m] + wl[-v:]) for wl in W]'], {}), '([(wl[:m] + wl[-v:]) for wl in W])\n', (29019, 29053), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((29262, 29289), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[ni[:] for ni in N]'], {}), '([ni[:] for ni in N])\n', (29268, 29289), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((30375, 30394), 'sympy.integrals.risch.splitfactor', 'splitfactor', (['fd', 'DE'], {}), '(fd, DE)\n', (30386, 30394), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((31873, 31886), 'sympy.polys.Poly', 'Poly', (['(0)', 'DE.t'], {}), '(0, DE.t)\n', (31877, 31886), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((31896, 31909), 'sympy.polys.Poly', 'Poly', (['(1)', 'DE.t'], {}), '(1, DE.t)\n', (31900, 31909), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((34238, 34256), 'sympy.integrals.risch.splitfactor', 'splitfactor', (['l', 'DE'], {}), '(l, DE)\n', (34249, 34256), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((34649, 34663), 'sympy.solvers.solve', 'solve', (['eqs', 'c1'], {}), '(eqs, c1)\n', (34654, 34663), False, 'from sympy.solvers import solve\n'), ((39354, 39379), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[E_part + L_part]'], {}), '([E_part + L_part])\n', (39360, 39379), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((44783, 44808), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[E_part + L_part]'], {}), '([E_part + L_part])\n', (44789, 44808), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((47372, 47391), 'sympy.integrals.risch.splitfactor', 'splitfactor', (['fd', 'DE'], {}), '(fd, DE)\n', (47383, 47391), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((47462, 47493), 'sympy.integrals.risch.residue_reduce', 'residue_reduce', (['fa', 'fd', 'DE'], {'z': 'z'}), '(fa, fd, DE, z=z)\n', (47476, 47493), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((1994, 2007), 'sympy.polys.Poly', 'Poly', (['(1)', 'DE.t'], {}), '(1, DE.t)\n', (1998, 2007), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((4682, 4698), 'sympy.polys.Poly', 'Poly', (['DE.t', 'DE.t'], {}), '(DE.t, DE.t)\n', (4686, 4698), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((4997, 5018), 'sympy.integrals.rde.order_at', 'order_at', (['ba', 'p', 'DE.t'], {}), '(ba, p, DE.t)\n', (5005, 5018), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((5021, 5042), 'sympy.integrals.rde.order_at', 'order_at', (['bd', 'p', 'DE.t'], {}), '(bd, p, DE.t)\n', (5029, 5042), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((7911, 7927), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['(0)', 'm', '[]'], {}), '(0, m, [])\n', (7917, 7927), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((8606, 8622), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['(0)', 'm', '[]'], {}), '(0, m, [])\n', (8612, 8622), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((11192, 11205), 'sympy.core.compatibility.range', 'range', (['A.rows'], {}), '(A.rows)\n', (11197, 11205), False, 'from sympy.core.compatibility import reduce, range\n'), ((12753, 12770), 'sympy.integrals.risch.derivation', 'derivation', (['a', 'DE'], {}), '(a, DE)\n', (12763, 12770), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((13632, 13640), 'sympy.core.compatibility.range', 'range', (['m'], {}), '(m)\n', (13637, 13640), False, 'from sympy.core.compatibility import reduce, range\n'), ((13877, 13888), 'sympy.matrices.zeros', 'zeros', (['(0)', '(2)'], {}), '(0, 2)\n', (13882, 13888), False, 'from sympy.matrices import zeros, eye\n'), ((14033, 14049), 'sympy.matrices.zeros', 'zeros', (['(dc + 1)', '(1)'], {}), '(dc + 1, 1)\n', (14038, 14049), False, 'from sympy.matrices import zeros, eye\n'), ((14849, 14857), 'sympy.core.compatibility.range', 'range', (['m'], {}), '(m)\n', (14854, 14857), False, 'from sympy.core.compatibility import reduce, range\n'), ((15100, 15108), 'sympy.core.compatibility.range', 'range', (['m'], {}), '(m)\n', (15105, 15108), False, 'from sympy.core.compatibility import reduce, range\n'), ((15531, 15537), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (15534, 15537), False, 'from sympy.matrices import zeros, eye\n'), ((17326, 17342), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['(0)', 'm', '[]'], {}), '(0, m, [])\n', (17332, 17342), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((17695, 17715), 'sympy.matrices.zeros', 'zeros', (['A.rows', '(r + m)'], {}), '(A.rows, r + m)\n', (17700, 17715), False, 'from sympy.matrices import zeros, eye\n'), ((17736, 17752), 'sympy.matrices.zeros', 'zeros', (['B.rows', 'm'], {}), '(B.rows, m)\n', (17741, 17752), False, 'from sympy.matrices import zeros, eye\n'), ((18900, 18909), 'sympy.core.compatibility.range', 'range', (['ri'], {}), '(ri)\n', (18905, 18909), False, 'from sympy.core.compatibility import reduce, range\n'), ((22271, 22287), 'sympy.matrices.zeros', 'zeros', (['M.rows', '(1)'], {}), '(M.rows, 1)\n', (22276, 22287), False, 'from sympy.matrices import zeros, eye\n'), ((24232, 24238), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (24235, 24238), False, 'from sympy.matrices import zeros, eye\n'), ((26321, 26337), 'sympy.matrices.zeros', 'zeros', (['M.rows', '(1)'], {}), '(M.rows, 1)\n', (26326, 26337), False, 'from sympy.matrices import zeros, eye\n'), ((27472, 27514), 'sympy.integrals.rde.bound_degree', 'bound_degree', (['a', 'b', 'r', 'DE'], {'parametric': '(True)'}), '(a, b, r, DE, parametric=True)\n', (27484, 27514), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((28302, 28308), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (28305, 28308), False, 'from sympy.matrices import zeros, eye\n'), ((30404, 30423), 'sympy.integrals.risch.splitfactor', 'splitfactor', (['gd', 'DE'], {}), '(gd, DE)\n', (30415, 30423), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((30592, 30610), 'sympy.integrals.risch.derivation', 'derivation', (['hn', 'DE'], {}), '(hn, DE)\n', (30602, 30610), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((33250, 33261), 'sympy.core.Dummy', 'Dummy', (['"""c1"""'], {}), "('c1')\n", (33255, 33261), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((33514, 33528), 'sympy.solvers.solve', 'solve', (['eqs', 'c1'], {}), '(eqs, c1)\n', (33519, 33528), False, 'from sympy.solvers import solve\n'), ((34211, 34224), 'sympy.polys.Poly', 'Poly', (['c', 'DE.t'], {}), '(c, DE.t)\n', (34215, 34224), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((47440, 47450), 'sympy.core.Dummy', 'Dummy', (['"""z"""'], {}), "('z')\n", (47445, 47450), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((50870, 50874), 'sympy.core.S', 'S', (['(1)'], {}), '(1)\n', (50871, 50874), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((4735, 4760), 'sympy.polys.Poly', 'Poly', (['(DE.t ** 2 + 1)', 'DE.t'], {}), '(DE.t ** 2 + 1, DE.t)\n', (4739, 4760), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((12786, 12804), 'sympy.integrals.risch.derivation', 'derivation', (['ri', 'DE'], {}), '(ri, DE)\n', (12796, 12804), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((13551, 13564), 'sympy.polys.Poly', 'Poly', (['(0)', 'DE.t'], {}), '(0, DE.t)\n', (13555, 13564), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((13702, 13728), 'sympy.polys.Poly', 'Poly', (['(si * DE.t ** N)', 'DE.t'], {}), '(si * DE.t ** N, DE.t)\n', (13706, 13728), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((14769, 14782), 'sympy.polys.Poly', 'Poly', (['(0)', 'DE.t'], {}), '(0, DE.t)\n', (14773, 14782), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((14945, 14971), 'sympy.polys.Poly', 'Poly', (['(si * DE.t ** N)', 'DE.t'], {}), '(si * DE.t ** N, DE.t)\n', (14949, 14971), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((15328, 15336), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', ([], {}), '()\n', (15334, 15336), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((15497, 15513), 'sympy.matrices.zeros', 'zeros', (['(dc + 1)', '(1)'], {}), '(dc + 1, 1)\n', (15502, 15513), False, 'from sympy.matrices import zeros, eye\n'), ((15721, 15739), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (15735, 15739), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((15798, 15824), 'sympy.integrals.risch.frac_in', 'frac_in', (['b', 't0'], {'field': '(True)'}), '(b, t0, field=True)\n', (15805, 15824), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((16828, 16850), 'sympy.polys.Poly', 'Poly', (['(1)', 't'], {'field': '(True)'}), '(1, t, field=True)\n', (16832, 16850), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((17251, 17262), 'sympy.matrices.zeros', 'zeros', (['d', '(1)'], {}), '(d, 1)\n', (17256, 17262), False, 'from sympy.matrices import zeros, eye\n'), ((18095, 18113), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (18109, 18113), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((18136, 18164), 'sympy.integrals.risch.frac_in', 'frac_in', (['b', 'DE.t'], {'field': '(True)'}), '(b, DE.t, field=True)\n', (18143, 18164), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((18426, 18444), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (18440, 18444), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((20161, 20177), 'sympy.matrices.zeros', 'zeros', (['M.rows', '(1)'], {}), '(M.rows, 1)\n', (20166, 20177), False, 'from sympy.matrices import zeros, eye\n'), ((23077, 23083), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (23080, 23083), False, 'from sympy.matrices import zeros, eye\n'), ((27119, 27125), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (27122, 27125), False, 'from sympy.matrices import zeros, eye\n'), ((30990, 31015), 'sympy.integrals.rde.order_at_oo', 'order_at_oo', (['fa', 'fd', 'DE.t'], {}), '(fa, fd, DE.t)\n', (31001, 31015), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((32458, 32475), 'sympy.polys.Poly', 'Poly', (['y_num', 'DE.t'], {}), '(y_num, DE.t)\n', (32462, 32475), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((32477, 32494), 'sympy.polys.Poly', 'Poly', (['y_den', 'DE.t'], {}), '(y_den, DE.t)\n', (32481, 32494), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((48659, 48694), 'sympy.integrals.risch.residue_reduce_derivation', 'residue_reduce_derivation', (['H', 'DE', 'z'], {}), '(H, DE, z)\n', (48684, 48694), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((49033, 49049), 'sympy.polys.Poly', 'Poly', (['DE.t', 'DE.t'], {}), '(DE.t, DE.t)\n', (49037, 49049), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((49078, 49096), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (49092, 49096), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((49119, 49148), 'sympy.integrals.risch.frac_in', 'frac_in', (['p', 'DE.t'], {'cancel': '(True)'}), '(p, DE.t, cancel=True)\n', (49126, 49148), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((49170, 49193), 'sympy.integrals.risch.frac_in', 'frac_in', (['(wa, wd)', 'DE.t'], {}), '((wa, wd), DE.t)\n', (49177, 49193), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((2163, 2180), 'sympy.integrals.risch.derivation', 'derivation', (['h', 'DE'], {}), '(h, DE)\n', (2173, 2180), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((5057, 5078), 'sympy.integrals.rde.order_at', 'order_at', (['Ga', 'p', 'DE.t'], {}), '(Ga, p, DE.t)\n', (5065, 5078), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((5081, 5102), 'sympy.integrals.rde.order_at', 'order_at', (['Gd', 'p', 'DE.t'], {}), '(Gd, p, DE.t)\n', (5089, 5102), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((5257, 5273), 'sympy.polys.Poly', 'Poly', (['DE.t', 'DE.t'], {}), '(DE.t, DE.t)\n', (5261, 5273), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((5292, 5310), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (5306, 5310), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((5529, 5550), 'sympy.integrals.risch.frac_in', 'frac_in', (['dcoeff', 'DE.t'], {}), '(dcoeff, DE.t)\n', (5536, 5550), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((11703, 11716), 'sympy.core.compatibility.range', 'range', (['A.rows'], {}), '(A.rows)\n', (11708, 11716), False, 'from sympy.core.compatibility import reduce, range\n'), ((14089, 14105), 'sympy.matrices.zeros', 'zeros', (['A.rows', 'm'], {}), '(A.rows, m)\n', (14094, 14105), False, 'from sympy.matrices import zeros, eye\n'), ((17773, 17784), 'sympy.matrices.zeros', 'zeros', (['m', 'r'], {}), '(m, r)\n', (17778, 17784), False, 'from sympy.matrices import zeros, eye\n'), ((18277, 18295), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (18291, 18295), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((19993, 20004), 'sympy.matrices.zeros', 'zeros', (['(1)', 'm'], {}), '(1, m)\n', (19998, 20004), False, 'from sympy.matrices import zeros, eye\n'), ((24339, 24355), 'sympy.matrices.zeros', 'zeros', (['B.rows', 'm'], {}), '(B.rows, m)\n', (24344, 24355), False, 'from sympy.matrices import zeros, eye\n'), ((28409, 28425), 'sympy.matrices.zeros', 'zeros', (['B.rows', 'm'], {}), '(B.rows, m)\n', (28414, 28425), False, 'from sympy.matrices import zeros, eye\n'), ((33481, 33500), 'sympy.core.compatibility.range', 'range', (['(B + 1)', '(C + 1)'], {}), '(B + 1, C + 1)\n', (33486, 33500), False, 'from sympy.core.compatibility import reduce, range\n'), ((38493, 38511), 'sympy.integrals.risch.derivation', 'derivation', (['fa', 'DE'], {}), '(fa, DE)\n', (38503, 38511), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((38517, 38535), 'sympy.integrals.risch.derivation', 'derivation', (['fd', 'DE'], {}), '(fd, DE)\n', (38527, 38535), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((40595, 40606), 'sympy.polys.sqf_list', 'sqf_list', (['i'], {}), '(i)\n', (40603, 40606), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((40726, 40737), 'sympy.polys.sqf_list', 'sqf_list', (['d'], {}), '(d)\n', (40734, 40737), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((49005, 49025), 'sympy.integrals.risch.derivation', 'derivation', (['DE.t', 'DE'], {}), '(DE.t, DE)\n', (49015, 49025), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((49382, 49400), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (49396, 49400), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((49423, 49439), 'sympy.integrals.risch.frac_in', 'frac_in', (['p', 'DE.t'], {}), '(p, DE.t)\n', (49430, 49439), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((4847, 4860), 'sympy.polys.Poly', 'Poly', (['(1)', 'DE.t'], {}), '(1, DE.t)\n', (4851, 4860), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((5818, 5843), 'sympy.polys.Poly', 'Poly', (['(DE.t ** 2 + 1)', 'DE.t'], {}), '(DE.t ** 2 + 1, DE.t)\n', (5822, 5843), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((5860, 5878), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (5874, 5878), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((6114, 6135), 'sympy.integrals.risch.frac_in', 'frac_in', (['dcoeff', 'DE.t'], {}), '(dcoeff, DE.t)\n', (6121, 6135), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((6155, 6201), 'sympy.integrals.risch.recognize_log_derivative', 'recognize_log_derivative', (['(2 * betaa)', 'betad', 'DE'], {}), '(2 * betaa, betad, DE)\n', (6179, 6201), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((6668, 6681), 'sympy.polys.Poly', 'Poly', (['n', 'DE.t'], {}), '(n, DE.t)\n', (6672, 6681), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((12068, 12081), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[um1]'], {}), '([um1])\n', (12074, 12081), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((12687, 12714), 'sympy.integrals.risch.gcdex_diophantine', 'gcdex_diophantine', (['b', 'a', 'qi'], {}), '(b, a, qi)\n', (12704, 12714), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((13782, 13802), 'sympy.integrals.risch.derivation', 'derivation', (['sitn', 'DE'], {}), '(sitn, DE)\n', (13792, 13802), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((15025, 15045), 'sympy.integrals.risch.derivation', 'derivation', (['sitn', 'DE'], {}), '(sitn, DE)\n', (15035, 15045), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((15226, 15244), 'sympy.integrals.risch.derivation', 'derivation', (['si', 'DE'], {}), '(si, DE)\n', (15236, 15244), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((15561, 15577), 'sympy.matrices.zeros', 'zeros', (['A.rows', 'm'], {}), '(A.rows, m)\n', (15566, 15577), False, 'from sympy.matrices import zeros, eye\n'), ((18775, 18792), 'sympy.matrices.zeros', 'zeros', (['M.rows', 'ri'], {}), '(M.rows, ri)\n', (18780, 18792), False, 'from sympy.matrices import zeros, eye\n'), ((19048, 19067), 'sympy.integrals.risch.derivation', 'derivation', (['hji', 'DE'], {}), '(hji, DE)\n', (19058, 19067), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((30945, 30963), 'sympy.integrals.risch.derivation', 'derivation', (['hs', 'DE'], {}), '(hs, DE)\n', (30955, 30963), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((31022, 31047), 'sympy.integrals.rde.order_at_oo', 'order_at_oo', (['ga', 'gd', 'DE.t'], {}), '(ga, gd, DE.t)\n', (31033, 31047), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((33323, 33343), 'sympy.integrals.risch.derivation', 'derivation', (['DE.t', 'DE'], {}), '(DE.t, DE)\n', (33333, 33343), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((39221, 39243), 'sympy.polys.Poly', 'Poly', (['DE.T[i]', 'DE.T[i]'], {}), '(DE.T[i], DE.T[i])\n', (39225, 39243), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((40886, 40894), 'sympy.core.Mul', 'Mul', (['*ld'], {}), '(*ld)\n', (40889, 40894), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((44650, 44672), 'sympy.polys.Poly', 'Poly', (['DE.T[i]', 'DE.T[i]'], {}), '(DE.T[i], DE.T[i])\n', (44654, 44672), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((50155, 50159), 'sympy.core.S', 'S', (['(1)'], {}), '(1)\n', (50156, 50159), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((6684, 6701), 'sympy.integrals.risch.derivation', 'derivation', (['p', 'DE'], {}), '(p, DE)\n', (6694, 6701), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((11586, 11618), 'sympy.integrals.risch.derivation', 'derivation', (['u[i]', 'DE'], {'basic': '(True)'}), '(u[i], DE, basic=True)\n', (11596, 11618), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((11640, 11675), 'sympy.integrals.risch.derivation', 'derivation', (['A[i, j]', 'DE'], {'basic': '(True)'}), '(A[i, j], DE, basic=True)\n', (11650, 11675), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((16906, 16910), 'sympy.core.S', 'S', (['(0)'], {}), '(0)\n', (16907, 16910), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((32388, 32396), 'sympy.core.compatibility.range', 'range', (['r'], {}), '(r)\n', (32393, 32396), False, 'from sympy.core.compatibility import reduce, range\n'), ((39486, 39515), 'sympy.integrals.risch.derivation', 'derivation', (['i', 'DE'], {'basic': '(True)'}), '(i, DE, basic=True)\n', (39496, 39515), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((40101, 40110), 'sympy.core.Mul', 'Mul', (['i', 'j'], {}), '(i, j)\n', (40104, 40110), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((40878, 40885), 'sympy.core.Mul', 'Mul', (['*l'], {}), '(*l)\n', (40881, 40885), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((43898, 43916), 'sympy.integrals.risch.derivation', 'derivation', (['fa', 'DE'], {}), '(fa, DE)\n', (43908, 43916), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((43922, 43940), 'sympy.integrals.risch.derivation', 'derivation', (['fd', 'DE'], {}), '(fd, DE)\n', (43932, 43940), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((44914, 44943), 'sympy.integrals.risch.derivation', 'derivation', (['i', 'DE'], {'basic': '(True)'}), '(i, DE, basic=True)\n', (44924, 44943), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((45827, 45836), 'sympy.core.Pow', 'Pow', (['i', 'j'], {}), '(i, j)\n', (45830, 45836), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((51090, 51099), 'sympy.core.Pow', 'Pow', (['i', 'j'], {}), '(i, j)\n', (51093, 51099), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((11425, 11454), 'sympy.integrals.risch.derivation', 'derivation', (['x', 'DE'], {'basic': '(True)'}), '(x, DE, basic=True)\n', (11435, 11454), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((11476, 11511), 'sympy.integrals.risch.derivation', 'derivation', (['A[i, j]', 'DE'], {'basic': '(True)'}), '(A[i, j], DE, basic=True)\n', (11486, 11511), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((11858, 11883), 'sympy.polys.cancel', 'cancel', (['(r - Asj * Rm1[jj])'], {}), '(r - Asj * Rm1[jj])\n', (11864, 11883), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((11979, 12000), 'sympy.polys.cancel', 'cancel', (['(r - Asj * um1)'], {}), '(r - Asj * um1)\n', (11985, 12000), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((50179, 50192), 'sympy.core.Pow', 'Pow', (['i', '(j * n)'], {}), '(i, j * n)\n', (50182, 50192), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((18336, 18356), 'sympy.integrals.risch.derivation', 'derivation', (['DE.t', 'DE'], {}), '(DE.t, DE)\n', (18346, 18356), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((46194, 46207), 'sympy.core.Mul', 'Mul', (['i', '(j / n)'], {}), '(i, j / n)\n', (46197, 46207), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((40639, 40653), 'sympy.core.Pow', 'Pow', (['icoeff', 'j'], {}), '(icoeff, j)\n', (40642, 40653), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((40658, 40671), 'sympy.core.Pow', 'Pow', (['b', '(e * j)'], {}), '(b, e * j)\n', (40661, 40671), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((40771, 40785), 'sympy.core.Pow', 'Pow', (['dcoeff', 'j'], {}), '(dcoeff, j)\n', (40774, 40785), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((40790, 40803), 'sympy.core.Pow', 'Pow', (['b', '(e * j)'], {}), '(b, e * j)\n', (40793, 40803), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n')] |
levs72/pyneng-examples | ssh_telnet/netmiko/ex07_netmiko_command_mult_prompts.py | d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9 | from pprint import pprint
import yaml
import netmiko
import paramiko
def send_cmd_with_prompt(device, command, *, wait_for, confirmation):
if type(wait_for) == str:
wait_for = [wait_for]
if type(confirmation) == str:
confirmation = [confirmation]
with netmiko.Netmiko(**device) as ssh:
ssh.enable()
result = ssh.send_command_timing(
command, strip_prompt=False, strip_command=False
)
for wait, confirm in zip(wait_for, confirmation):
if wait in result:
result += ssh.send_command_timing(
confirm, strip_prompt=False, strip_command=False
)
return result
if __name__ == "__main__":
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
r1 = devices[0]
out = send_cmd_with_prompt(
r1, "copy run start", wait_for="Destination filename", confirmation="\n"
)
print(out)
"""
R1#copy run start
Destination filename [startup-config]?
Building configuration...
[OK]
R1#
"""
| [((282, 307), 'netmiko.Netmiko', 'netmiko.Netmiko', ([], {}), '(**device)\n', (297, 307), False, 'import netmiko\n'), ((782, 799), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (796, 799), False, 'import yaml\n')] |
marcodalessandro76/MPPI | mppi/Utilities/AttributeDict.py | ad60b73270b1f376ac501d47285146f1c3af457a | class AttributeDict(object):
"""
A class to convert a nested Dictionary into an object with key-values
accessibly using attribute notation (AttributeDict.attribute) instead of
key notation (Dict["key"]). This class recursively sets Dicts to objects,
allowing you to recurse down nested dicts (like: AttributeDict.attr.attr)
"""
def __init__(self, **entries):
self.add_entries(**entries)
def add_entries(self, **entries):
for key, value in entries.items():
if type(value) is dict:
self.__dict__[key] = AttributeDict(**value)
else:
self.__dict__[key] = value
def getAttributes(self):
"""
Return all the attributes of the object
"""
return self.__dict__.keys()
| [] |
Growing-Beyond-Earth/GettingStarted | LightTestLoop.py | 04c2fd5fa36224ac25a6c6c62c4d6e558b27e700 | # GROWNG BEYOND EARTH CONTROL BOX Traning
# RASPBERRY PI PICO / MICROPYTHON
# FAIRCHILD TROPICAL BOTANIC GARDEN, Oct 18, 2021
# The Growing Beyond Earth (GBE) control box is a device that controls
# the LED lights and fan in a GBE growth chamber. It can also control
# accessories including a 12v water pump and environmental sensors.
# The device is based on a Raspberry Pi Pico microcontroller running
# Micropython.
# lesson Written by @MarioTheMaker
from sys import stdin, stdout, exit
import machine
import time
#Set the brightness for each color
red_brightness = 100
green_brightness = 100
blue_brightness = 100
white_brightness = 100
# Pulse width modulation (PWM) is a way to get an artificial analog output on a digital pin.
# It achieves this by rapidly toggling the pin from low to high. There are two parameters
# associated with this: the frequency of the toggling, and the duty cycle.
# The duty cycle is defined to be how long the pin is high compared with the length of a
# single period (low plus high time). Maximum duty cycle is when the pin is high all of the
# time, and minimum is when it is low all of the time.
# https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#:
# control I/O pins
# machine.Pin(id, mode=- 1, pull=- 1, *, value, drive, alt)
# Access the pin peripheral (GPIO pin) associated with the given id.
# If additional arguments are given in the constructor then they are used to initialise
# the pin. Any settings that are not specified will remain in their previous state.
# More info https://docs.micropython.org/en/latest/library/machine.Pin.html
r=machine.PWM(machine.Pin(0)); r.freq(20000) # Red channel
g=machine.PWM(machine.Pin(2)); g.freq(20000) # Green channel
b=machine.PWM(machine.Pin(1)); b.freq(20000) # Blue channel
w=machine.PWM(machine.Pin(3)); w.freq(20000) # White channel
# More info https://docs.micropython.org/en/latest/library/machine.PWM.html
# Start a loop and change the brightness multiplier "n"
# PWM.duty_u16([value]) Get the current duty cycle of the PWM output,
# as an unsigned 16-bit value in the range 0 to 65535 inclusive.
n = 100
while n > 0:
print("Power Level ",n)
r.duty_u16(int(red_brightness)*n)
g.duty_u16(int(green_brightness)*n)
b.duty_u16(int(blue_brightness)*n)
w.duty_u16(int(white_brightness)*n)
time.sleep(.3)
n = n - 5
#Turn all the lights off
time.sleep(3)
r.duty_u16(0)
g.duty_u16(0)
b.duty_u16(0)
w.duty_u16(0)
| [((2409, 2422), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2419, 2422), False, 'import time\n'), ((1639, 1653), 'machine.Pin', 'machine.Pin', (['(0)'], {}), '(0)\n', (1650, 1653), False, 'import machine\n'), ((1700, 1714), 'machine.Pin', 'machine.Pin', (['(2)'], {}), '(2)\n', (1711, 1714), False, 'import machine\n'), ((1763, 1777), 'machine.Pin', 'machine.Pin', (['(1)'], {}), '(1)\n', (1774, 1777), False, 'import machine\n'), ((1825, 1839), 'machine.Pin', 'machine.Pin', (['(3)'], {}), '(3)\n', (1836, 1839), False, 'import machine\n'), ((2353, 2368), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (2363, 2368), False, 'import time\n')] |
nicolasbock/hotsos | core/known_bugs_utils.py | 6a0d650a8d76b5a5f85f4ddc8c0a9f8939e1de7a | import os
import yaml
from core import plugintools
from core import constants
from core.searchtools import SearchDef
from core.issues.issue_utils import IssueEntry
LAUNCHPAD = "launchpad"
MASTER_YAML_KNOWN_BUGS_KEY = "bugs-detected"
KNOWN_BUGS = {MASTER_YAML_KNOWN_BUGS_KEY: []}
class BugSearchDef(SearchDef):
def __init__(self, pattern, bug_id, hint, reason,
reason_format_result_groups=None):
"""
@param reason: string reason describing the issue and why it has been
flagged. This string can be a template i.e. containing {} fields that
can be rendered using results.
@param reason_format_result_groups: if the reason string is a template,
this is a list of indexes in the results that can be extracted for
inclusion in the reason.
"""
super().__init__(pattern, tag=bug_id, hint=hint)
self._reason = reason
if reason is None:
self._reason = ""
self.reason_format_result_groups = reason_format_result_groups
@property
def reason(self):
return self._reason
def rendered_reason(self, search_result):
if self._reason and self.reason_format_result_groups:
values = []
for idx in self.reason_format_result_groups:
values.append(search_result.get(idx))
return self._reason.format(*values)
return self._reason
def _get_known_bugs():
"""
Fetch the current plugin known_bugs.yaml if it exists and return its
contents or None if it doesn't exist yet.
"""
if not os.path.isdir(constants.PLUGIN_TMP_DIR):
raise Exception("plugin tmp dir '{}' not found".
format(constants.PLUGIN_TMP_DIR))
known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, "known_bugs.yaml")
if not os.path.exists(known_bugs_yaml):
return {}
bugs = yaml.safe_load(open(known_bugs_yaml))
if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY):
return bugs
return {}
def add_known_bug(bug_id, description=None, type=LAUNCHPAD):
"""
Fetch the current plugin known_bugs.yaml if it exists and add new bug with
description of the bug.
"""
if not os.path.isdir(constants.PLUGIN_TMP_DIR):
raise Exception("plugin tmp dir '{}' not found".
format(constants.PLUGIN_TMP_DIR))
if type == LAUNCHPAD:
new_bug = "https://bugs.launchpad.net/bugs/{}".format(bug_id)
if description is None:
description = "no description provided"
entry = IssueEntry(new_bug, description, key="id")
current = _get_known_bugs()
if current and current.get(MASTER_YAML_KNOWN_BUGS_KEY):
current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data)
else:
current = {MASTER_YAML_KNOWN_BUGS_KEY: [entry.data]}
known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, "known_bugs.yaml")
with open(known_bugs_yaml, 'w') as fd:
fd.write(yaml.dump(current))
def add_known_bugs_to_master_plugin():
"""
Fetch the current plugin known_bugs.yaml and add it to the master yaml.
Note that this can only be called once per plugin and is typically
performed as a final part after all others have executed.
"""
bugs = _get_known_bugs()
if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY):
plugintools.save_part(bugs, priority=99)
| [((1781, 1838), 'os.path.join', 'os.path.join', (['constants.PLUGIN_TMP_DIR', '"""known_bugs.yaml"""'], {}), "(constants.PLUGIN_TMP_DIR, 'known_bugs.yaml')\n", (1793, 1838), False, 'import os\n'), ((2581, 2623), 'core.issues.issue_utils.IssueEntry', 'IssueEntry', (['new_bug', 'description'], {'key': '"""id"""'}), "(new_bug, description, key='id')\n", (2591, 2623), False, 'from core.issues.issue_utils import IssueEntry\n'), ((2873, 2930), 'os.path.join', 'os.path.join', (['constants.PLUGIN_TMP_DIR', '"""known_bugs.yaml"""'], {}), "(constants.PLUGIN_TMP_DIR, 'known_bugs.yaml')\n", (2885, 2930), False, 'import os\n'), ((1601, 1640), 'os.path.isdir', 'os.path.isdir', (['constants.PLUGIN_TMP_DIR'], {}), '(constants.PLUGIN_TMP_DIR)\n', (1614, 1640), False, 'import os\n'), ((1850, 1881), 'os.path.exists', 'os.path.exists', (['known_bugs_yaml'], {}), '(known_bugs_yaml)\n', (1864, 1881), False, 'import os\n'), ((2237, 2276), 'os.path.isdir', 'os.path.isdir', (['constants.PLUGIN_TMP_DIR'], {}), '(constants.PLUGIN_TMP_DIR)\n', (2250, 2276), False, 'import os\n'), ((3368, 3408), 'core.plugintools.save_part', 'plugintools.save_part', (['bugs'], {'priority': '(99)'}), '(bugs, priority=99)\n', (3389, 3408), False, 'from core import plugintools\n'), ((2991, 3009), 'yaml.dump', 'yaml.dump', (['current'], {}), '(current)\n', (3000, 3009), False, 'import yaml\n')] |
keobox/yap101 | examples/xml-rpc/echoserver.py | 26913da9f61ef3d0d9cb3ef54bbfc451a9ef9de9 | import SimpleXMLRPCServer as xmls
def echo(msg):
print 'Got', msg
return msg
class echoserver(xmls.SimpleXMLRPCServer):
allow_reuse_address = True
server = echoserver(('127.0.0.1', 8001))
server.register_function(echo, 'echo')
print 'Listening on port 8001'
try:
server.serve_forever()
except:
server.server_close()
| [] |
gpspelle/pose-estimation | tf_pose/slim/nets/mobilenet/mobilenet_v2_test.py | b817dcc120092002984d8a41431046f323bc02c8 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
slim = tf.contrib.slim
def find_ops(optype):
"""Find ops of a given type in graphdef or a graph.
Args:
optype: operation type (e.g. Conv2D)
Returns:
List of operations.
"""
gd = tf.get_default_graph()
return [var for var in gd.get_operations() if var.type == optype]
class MobilenetV2Test(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testCreation(self):
spec = dict(mobilenet_v2.V2_DEF)
_, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# This is mostly a sanity test. No deep reason for these particular
# constants.
#
# All but first 2 and last one have two convolutions, and there is one
# extra conv that is not in the spec. (logits)
self.assertEqual(num_convs, len(spec['spec']) * 2 - 2)
# Check that depthwise are exposed.
for i in range(2, 17):
self.assertIn('layer_%d/depthwise_output' % i, ep)
def testCreationNoClasses(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
net, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec,
num_classes=None)
self.assertIs(net, ep['global_pool'])
def testImageSizes(self):
for input_size, output_size in [(224, 7), (192, 6), (160, 5),
(128, 4), (96, 3)]:
tf.reset_default_graph()
_, ep = mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, input_size, input_size, 3)))
self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3],
[output_size] * 2)
def testWithSplits(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
spec['overrides'] = {
(ops.expanded_conv,): dict(split_expansion=2),
}
_, _ = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# All but 3 op has 3 conv operatore, the remainign 3 have one
# and there is one unaccounted.
self.assertEqual(num_convs, len(spec['spec']) * 3 - 5)
def testWithOutputStride8(self):
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testDivisibleBy(self):
tf.reset_default_graph()
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
divisible_by=16,
min_depth=32)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576, 960, 1280,
1001], s)
def testDivisibleByWithArgScope(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements(s, [32, 192, 128, 1001])
def testFineGrained(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01,
finegrain_classification_mode=True)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
# All convolutions will be 8->48, except for the last one.
self.assertSameElements(s, [8, 48, 1001, 1280])
def testMobilenetBase(self):
tf.reset_default_graph()
# Verifies that mobilenet_base returns pre-pooling layer.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
net, _ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128])
def testWithOutputStride16(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testWithOutputStride8AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
use_explicit_padding=True,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testWithOutputStride16AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16,
use_explicit_padding=True)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):
sc = mobilenet.training_scope(is_training=None)
self.assertNotIn('is_training', sc[slim.arg_scope_func_key(
slim.batch_norm)])
def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):
sc = mobilenet.training_scope(is_training=False)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope(is_training=True)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope()
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
if __name__ == '__main__':
tf.test.main()
| [((1190, 1212), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1210, 1212), True, 'import tensorflow as tf\n'), ((7068, 7082), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (7080, 7082), True, 'import tensorflow as tf\n'), ((1348, 1372), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1370, 1372), True, 'import tensorflow as tf\n'), ((2035, 2069), 'copy.deepcopy', 'copy.deepcopy', (['mobilenet_v2.V2_DEF'], {}), '(mobilenet_v2.V2_DEF)\n', (2048, 2069), False, 'import copy\n'), ((2692, 2726), 'copy.deepcopy', 'copy.deepcopy', (['mobilenet_v2.V2_DEF'], {}), '(mobilenet_v2.V2_DEF)\n', (2705, 2726), False, 'import copy\n'), ((3440, 3464), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3462, 3464), True, 'import tensorflow as tf\n'), ((3891, 3915), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3913, 3915), True, 'import tensorflow as tf\n'), ((4428, 4452), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4450, 4452), True, 'import tensorflow as tf\n'), ((4993, 5017), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5015, 5017), True, 'import tensorflow as tf\n'), ((5423, 5447), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5445, 5447), True, 'import tensorflow as tf\n'), ((5729, 5753), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5751, 5753), True, 'import tensorflow as tf\n'), ((6099, 6123), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (6121, 6123), True, 'import tensorflow as tf\n'), ((6461, 6503), 'nets.mobilenet.mobilenet.training_scope', 'mobilenet.training_scope', ([], {'is_training': 'None'}), '(is_training=None)\n', (6485, 6503), False, 'from nets.mobilenet import mobilenet\n'), ((6668, 6711), 'nets.mobilenet.mobilenet.training_scope', 'mobilenet.training_scope', ([], {'is_training': '(False)'}), '(is_training=False)\n', (6692, 6711), False, 'from nets.mobilenet import mobilenet\n'), ((6800, 6842), 'nets.mobilenet.mobilenet.training_scope', 'mobilenet.training_scope', ([], {'is_training': '(True)'}), '(is_training=True)\n', (6824, 6842), False, 'from nets.mobilenet import mobilenet\n'), ((6931, 6957), 'nets.mobilenet.mobilenet.training_scope', 'mobilenet.training_scope', ([], {}), '()\n', (6955, 6957), False, 'from nets.mobilenet import mobilenet\n'), ((1478, 1524), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (1492, 1524), True, 'import tensorflow as tf\n'), ((2113, 2159), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (2127, 2159), True, 'import tensorflow as tf\n'), ((2402, 2426), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2424, 2426), True, 'import tensorflow as tf\n'), ((2854, 2900), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (2868, 2900), True, 'import tensorflow as tf\n'), ((3202, 3248), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (3216, 3248), True, 'import tensorflow as tf\n'), ((3501, 3547), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (3515, 3547), True, 'import tensorflow as tf\n'), ((4595, 4640), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 2)'], {}), '(tf.float32, (10, 224, 224, 2))\n', (4609, 4640), True, 'import tensorflow as tf\n'), ((5495, 5541), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (5509, 5541), True, 'import tensorflow as tf\n'), ((5801, 5847), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (5815, 5847), True, 'import tensorflow as tf\n'), ((6171, 6217), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (6185, 6217), True, 'import tensorflow as tf\n'), ((2475, 2534), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, input_size, input_size, 3)'], {}), '(tf.float32, (10, input_size, input_size, 3))\n', (2489, 2534), True, 'import tensorflow as tf\n'), ((4131, 4176), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 2)'], {}), '(tf.float32, (10, 224, 224, 2))\n', (4145, 4176), True, 'import tensorflow as tf\n'), ((5204, 5250), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (5218, 5250), True, 'import tensorflow as tf\n')] |
darwin/firebase-gist | firebase-gist.py | 5aa4eb89e82fbf2971d7afca07471e1f51ff6e51 | from firebase import firebase
import os
import datetime
import json
import logging
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from github3 import login
firebase_url = os.environ['FIREBASE_DB']
firebase_secret = os.environ['FIREBASE_SECRET']
firebase_path = os.environ['FIREBASE_PATH']
firebase_username = os.environ['FIREBASE_USERNAME'] # not checked ATM
gh_token = os.environ['GH_TOKEN']
gh_gist = os.environ['GH_GIST']
gh_fname = os.environ['GH_FNAME']
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def connect_firebase():
f = firebase.FirebaseApplication(firebase_url, None)
f.authentication = firebase.FirebaseAuthentication(firebase_secret, firebase_username, admin=True)
return f
logger.info('==================================')
logger.info('Fetching firebase data')
f = connect_firebase()
data = f.get(firebase_path, None)
new_content = json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True)
logger.info('Reading existing gist')
gh = login(token=gh_token)
gist = gh.gist(gh_gist)
old_content = ""
for f in gist.iter_files():
if f.filename == gh_fname:
old_content = f.content
break
if old_content == new_content:
logger.info('No changes detected')
else:
logger.info('Updating gist with new content')
gist.edit(files={
gh_fname: {
"content": new_content
}
})
logger.info('Done.') | [((487, 526), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (506, 526), False, 'import logging\n'), ((536, 563), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (553, 563), False, 'import logging\n'), ((916, 978), 'json.dumps', 'json.dumps', (['data'], {'ensure_ascii': '(False)', 'indent': '(2)', 'sort_keys': '(True)'}), '(data, ensure_ascii=False, indent=2, sort_keys=True)\n', (926, 978), False, 'import json\n'), ((1022, 1043), 'github3.login', 'login', ([], {'token': 'gh_token'}), '(token=gh_token)\n', (1027, 1043), False, 'from github3 import login\n'), ((595, 643), 'firebase.firebase.FirebaseApplication', 'firebase.FirebaseApplication', (['firebase_url', 'None'], {}), '(firebase_url, None)\n', (623, 643), False, 'from firebase import firebase\n'), ((665, 744), 'firebase.firebase.FirebaseAuthentication', 'firebase.FirebaseAuthentication', (['firebase_secret', 'firebase_username'], {'admin': '(True)'}), '(firebase_secret, firebase_username, admin=True)\n', (696, 744), False, 'from firebase import firebase\n')] |
victorWeiFreelancer/CodeJam | practice/2008/qualification/C-Fly_swatter/c.py | edb8f921860a35985823cb3dbd3ebec8a8f3c12f | import sys
sys.dont_write_bytecode = True
def hitP(f, R, t, r, g):
if f>=g/2 :
return 0.0
missArea = 0.0
gridL = g+2*r
nGrids = (R - t) // gridL
missGridSideLength = g - 2*f
print("gridL %.12f; nGrids %d" %(gridL, nGrids) )
indentSquareLength = nGrids*gridL
remain = (R - t) - indentSquareLength
missArea += (nGrids * missGridSideLength)**2
remainMissArea = 0
if remain - 2*r > 2*f
if remain > g+r:
totalArea = R**2 / 4.0
print( "missed a %.12f, total area %.12f" %(missR**2, (R-t)**2) )
return (totalArea - missArea) / (R-t)**2
def main():
numTestCases = int(input())
for i in range(numTestCases):
f, R, t, r, g = list(map(float, input().split()))
p = hitP(f, R, t, r, g)
print( "Case #%d: %.6f" %(i+1, p))
if __name__ == '__main__':
main() | [] |
rkfg/synapse | synapse/notifier.py | 0b3112123da5fae4964db784e3bab0c4d83d9d62 | # -*- coding: utf-8 -*-
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import namedtuple
from typing import (
Awaitable,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
import attr
from prometheus_client import Counter
from twisted.internet import defer
import synapse.server
from synapse.api.constants import EventTypes, HistoryVisibility, Membership
from synapse.api.errors import AuthError
from synapse.events import EventBase
from synapse.handlers.presence import format_user_presence_state
from synapse.logging.context import PreserveLoggingContext
from synapse.logging.opentracing import log_kv, start_active_span
from synapse.logging.utils import log_function
from synapse.metrics import LaterGauge
from synapse.streams.config import PaginationConfig
from synapse.types import (
Collection,
PersistedEventPosition,
RoomStreamToken,
StreamToken,
UserID,
)
from synapse.util.async_helpers import ObservableDeferred, timeout_deferred
from synapse.util.metrics import Measure
from synapse.visibility import filter_events_for_client
logger = logging.getLogger(__name__)
notified_events_counter = Counter("synapse_notifier_notified_events", "")
users_woken_by_stream_counter = Counter(
"synapse_notifier_users_woken_by_stream", "", ["stream"]
)
T = TypeVar("T")
# TODO(paul): Should be shared somewhere
def count(func: Callable[[T], bool], it: Iterable[T]) -> int:
"""Return the number of items in it for which func returns true."""
n = 0
for x in it:
if func(x):
n += 1
return n
class _NotificationListener:
"""This represents a single client connection to the events stream.
The events stream handler will have yielded to the deferred, so to
notify the handler it is sufficient to resolve the deferred.
"""
__slots__ = ["deferred"]
def __init__(self, deferred):
self.deferred = deferred
class _NotifierUserStream:
"""This represents a user connected to the event stream.
It tracks the most recent stream token for that user.
At a given point a user may have a number of streams listening for
events.
This listener will also keep track of which rooms it is listening in
so that it can remove itself from the indexes in the Notifier class.
"""
def __init__(
self,
user_id: str,
rooms: Collection[str],
current_token: StreamToken,
time_now_ms: int,
):
self.user_id = user_id
self.rooms = set(rooms)
self.current_token = current_token
# The last token for which we should wake up any streams that have a
# token that comes before it. This gets updated every time we get poked.
# We start it at the current token since if we get any streams
# that have a token from before we have no idea whether they should be
# woken up or not, so lets just wake them up.
self.last_notified_token = current_token
self.last_notified_ms = time_now_ms
with PreserveLoggingContext():
self.notify_deferred = ObservableDeferred(defer.Deferred())
def notify(
self,
stream_key: str,
stream_id: Union[int, RoomStreamToken],
time_now_ms: int,
):
"""Notify any listeners for this user of a new event from an
event source.
Args:
stream_key: The stream the event came from.
stream_id: The new id for the stream the event came from.
time_now_ms: The current time in milliseconds.
"""
self.current_token = self.current_token.copy_and_advance(stream_key, stream_id)
self.last_notified_token = self.current_token
self.last_notified_ms = time_now_ms
noify_deferred = self.notify_deferred
log_kv(
{
"notify": self.user_id,
"stream": stream_key,
"stream_id": stream_id,
"listeners": self.count_listeners(),
}
)
users_woken_by_stream_counter.labels(stream_key).inc()
with PreserveLoggingContext():
self.notify_deferred = ObservableDeferred(defer.Deferred())
noify_deferred.callback(self.current_token)
def remove(self, notifier: "Notifier"):
"""Remove this listener from all the indexes in the Notifier
it knows about.
"""
for room in self.rooms:
lst = notifier.room_to_user_streams.get(room, set())
lst.discard(self)
notifier.user_to_user_stream.pop(self.user_id)
def count_listeners(self) -> int:
return len(self.notify_deferred.observers())
def new_listener(self, token: StreamToken) -> _NotificationListener:
"""Returns a deferred that is resolved when there is a new token
greater than the given token.
Args:
token: The token from which we are streaming from, i.e. we shouldn't
notify for things that happened before this.
"""
# Immediately wake up stream if something has already since happened
# since their last token.
if self.last_notified_token != token:
return _NotificationListener(defer.succeed(self.current_token))
else:
return _NotificationListener(self.notify_deferred.observe())
class EventStreamResult(namedtuple("EventStreamResult", ("events", "tokens"))):
def __bool__(self):
return bool(self.events)
@attr.s(slots=True, frozen=True)
class _PendingRoomEventEntry:
event_pos = attr.ib(type=PersistedEventPosition)
extra_users = attr.ib(type=Collection[UserID])
room_id = attr.ib(type=str)
type = attr.ib(type=str)
state_key = attr.ib(type=Optional[str])
membership = attr.ib(type=Optional[str])
class Notifier:
"""This class is responsible for notifying any listeners when there are
new events available for it.
Primarily used from the /events stream.
"""
UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000
def __init__(self, hs: "synapse.server.HomeServer"):
self.user_to_user_stream = {} # type: Dict[str, _NotifierUserStream]
self.room_to_user_streams = {} # type: Dict[str, Set[_NotifierUserStream]]
self.hs = hs
self.storage = hs.get_storage()
self.event_sources = hs.get_event_sources()
self.store = hs.get_datastore()
self.pending_new_room_events = [] # type: List[_PendingRoomEventEntry]
# Called when there are new things to stream over replication
self.replication_callbacks = [] # type: List[Callable[[], None]]
# Called when remote servers have come back online after having been
# down.
self.remote_server_up_callbacks = [] # type: List[Callable[[str], None]]
self.clock = hs.get_clock()
self.appservice_handler = hs.get_application_service_handler()
self._pusher_pool = hs.get_pusherpool()
self.federation_sender = None
if hs.should_send_federation():
self.federation_sender = hs.get_federation_sender()
self.state_handler = hs.get_state_handler()
self.clock.looping_call(
self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS
)
# This is not a very cheap test to perform, but it's only executed
# when rendering the metrics page, which is likely once per minute at
# most when scraping it.
def count_listeners():
all_user_streams = set() # type: Set[_NotifierUserStream]
for streams in list(self.room_to_user_streams.values()):
all_user_streams |= streams
for stream in list(self.user_to_user_stream.values()):
all_user_streams.add(stream)
return sum(stream.count_listeners() for stream in all_user_streams)
LaterGauge("synapse_notifier_listeners", "", [], count_listeners)
LaterGauge(
"synapse_notifier_rooms",
"",
[],
lambda: count(bool, list(self.room_to_user_streams.values())),
)
LaterGauge(
"synapse_notifier_users", "", [], lambda: len(self.user_to_user_stream)
)
def add_replication_callback(self, cb: Callable[[], None]):
"""Add a callback that will be called when some new data is available.
Callback is not given any arguments. It should *not* return a Deferred - if
it needs to do any asynchronous work, a background thread should be started and
wrapped with run_as_background_process.
"""
self.replication_callbacks.append(cb)
def on_new_room_event(
self,
event: EventBase,
event_pos: PersistedEventPosition,
max_room_stream_token: RoomStreamToken,
extra_users: Optional[Collection[UserID]] = None,
):
"""Unwraps event and calls `on_new_room_event_args`."""
self.on_new_room_event_args(
event_pos=event_pos,
room_id=event.room_id,
event_type=event.type,
state_key=event.get("state_key"),
membership=event.content.get("membership"),
max_room_stream_token=max_room_stream_token,
extra_users=extra_users or [],
)
def on_new_room_event_args(
self,
room_id: str,
event_type: str,
state_key: Optional[str],
membership: Optional[str],
event_pos: PersistedEventPosition,
max_room_stream_token: RoomStreamToken,
extra_users: Optional[Collection[UserID]] = None,
):
"""Used by handlers to inform the notifier something has happened
in the room, room event wise.
This triggers the notifier to wake up any listeners that are
listening to the room, and any listeners for the users in the
`extra_users` param.
The events can be peristed out of order. The notifier will wait
until all previous events have been persisted before notifying
the client streams.
"""
self.pending_new_room_events.append(
_PendingRoomEventEntry(
event_pos=event_pos,
extra_users=extra_users or [],
room_id=room_id,
type=event_type,
state_key=state_key,
membership=membership,
)
)
self._notify_pending_new_room_events(max_room_stream_token)
self.notify_replication()
def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken):
"""Notify for the room events that were queued waiting for a previous
event to be persisted.
Args:
max_room_stream_token: The highest stream_id below which all
events have been persisted.
"""
pending = self.pending_new_room_events
self.pending_new_room_events = []
users = set() # type: Set[UserID]
rooms = set() # type: Set[str]
for entry in pending:
if entry.event_pos.persisted_after(max_room_stream_token):
self.pending_new_room_events.append(entry)
else:
if (
entry.type == EventTypes.Member
and entry.membership == Membership.JOIN
and entry.state_key
):
self._user_joined_room(entry.state_key, entry.room_id)
users.update(entry.extra_users)
rooms.add(entry.room_id)
if users or rooms:
self.on_new_event(
"room_key",
max_room_stream_token,
users=users,
rooms=rooms,
)
self._on_updated_room_token(max_room_stream_token)
def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken):
"""Poke services that might care that the room position has been
updated.
"""
# poke any interested application service.
self._notify_app_services(max_room_stream_token)
self._notify_pusher_pool(max_room_stream_token)
if self.federation_sender:
self.federation_sender.notify_new_events(max_room_stream_token)
def _notify_app_services(self, max_room_stream_token: RoomStreamToken):
try:
self.appservice_handler.notify_interested_services(max_room_stream_token)
except Exception:
logger.exception("Error notifying application services of event")
def _notify_app_services_ephemeral(
self,
stream_key: str,
new_token: Union[int, RoomStreamToken],
users: Optional[Collection[Union[str, UserID]]] = None,
):
try:
stream_token = None
if isinstance(new_token, int):
stream_token = new_token
self.appservice_handler.notify_interested_services_ephemeral(
stream_key, stream_token, users or []
)
except Exception:
logger.exception("Error notifying application services of event")
def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken):
try:
self._pusher_pool.on_new_notifications(max_room_stream_token)
except Exception:
logger.exception("Error pusher pool of event")
def on_new_event(
self,
stream_key: str,
new_token: Union[int, RoomStreamToken],
users: Optional[Collection[Union[str, UserID]]] = None,
rooms: Optional[Collection[str]] = None,
):
"""Used to inform listeners that something has happened event wise.
Will wake up all listeners for the given users and rooms.
"""
users = users or []
rooms = rooms or []
with Measure(self.clock, "on_new_event"):
user_streams = set()
log_kv(
{
"waking_up_explicit_users": len(users),
"waking_up_explicit_rooms": len(rooms),
}
)
for user in users:
user_stream = self.user_to_user_stream.get(str(user))
if user_stream is not None:
user_streams.add(user_stream)
for room in rooms:
user_streams |= self.room_to_user_streams.get(room, set())
time_now_ms = self.clock.time_msec()
for user_stream in user_streams:
try:
user_stream.notify(stream_key, new_token, time_now_ms)
except Exception:
logger.exception("Failed to notify listener")
self.notify_replication()
# Notify appservices
self._notify_app_services_ephemeral(
stream_key,
new_token,
users,
)
def on_new_replication_data(self) -> None:
"""Used to inform replication listeners that something has happened
without waking up any of the normal user event streams"""
self.notify_replication()
async def wait_for_events(
self,
user_id: str,
timeout: int,
callback: Callable[[StreamToken, StreamToken], Awaitable[T]],
room_ids=None,
from_token=StreamToken.START,
) -> T:
"""Wait until the callback returns a non empty response or the
timeout fires.
"""
user_stream = self.user_to_user_stream.get(user_id)
if user_stream is None:
current_token = self.event_sources.get_current_token()
if room_ids is None:
room_ids = await self.store.get_rooms_for_user(user_id)
user_stream = _NotifierUserStream(
user_id=user_id,
rooms=room_ids,
current_token=current_token,
time_now_ms=self.clock.time_msec(),
)
self._register_with_keys(user_stream)
result = None
prev_token = from_token
if timeout:
end_time = self.clock.time_msec() + timeout
while not result:
try:
now = self.clock.time_msec()
if end_time <= now:
break
# Now we wait for the _NotifierUserStream to be told there
# is a new token.
listener = user_stream.new_listener(prev_token)
listener.deferred = timeout_deferred(
listener.deferred,
(end_time - now) / 1000.0,
self.hs.get_reactor(),
)
with start_active_span("wait_for_events.deferred"):
log_kv(
{
"wait_for_events": "sleep",
"token": prev_token,
}
)
with PreserveLoggingContext():
await listener.deferred
log_kv(
{
"wait_for_events": "woken",
"token": user_stream.current_token,
}
)
current_token = user_stream.current_token
result = await callback(prev_token, current_token)
log_kv(
{
"wait_for_events": "result",
"result": bool(result),
}
)
if result:
break
# Update the prev_token to the current_token since nothing
# has happened between the old prev_token and the current_token
prev_token = current_token
except defer.TimeoutError:
log_kv({"wait_for_events": "timeout"})
break
except defer.CancelledError:
log_kv({"wait_for_events": "cancelled"})
break
if result is None:
# This happened if there was no timeout or if the timeout had
# already expired.
current_token = user_stream.current_token
result = await callback(prev_token, current_token)
return result
async def get_events_for(
self,
user: UserID,
pagination_config: PaginationConfig,
timeout: int,
is_guest: bool = False,
explicit_room_id: Optional[str] = None,
) -> EventStreamResult:
"""For the given user and rooms, return any new events for them. If
there are no new events wait for up to `timeout` milliseconds for any
new events to happen before returning.
If explicit_room_id is not set, the user's joined rooms will be polled
for events.
If explicit_room_id is set, that room will be polled for events only if
it is world readable or the user has joined the room.
"""
if pagination_config.from_token:
from_token = pagination_config.from_token
else:
from_token = self.event_sources.get_current_token()
limit = pagination_config.limit
room_ids, is_joined = await self._get_room_ids(user, explicit_room_id)
is_peeking = not is_joined
async def check_for_updates(
before_token: StreamToken, after_token: StreamToken
) -> EventStreamResult:
if after_token == before_token:
return EventStreamResult([], (from_token, from_token))
events = [] # type: List[EventBase]
end_token = from_token
for name, source in self.event_sources.sources.items():
keyname = "%s_key" % name
before_id = getattr(before_token, keyname)
after_id = getattr(after_token, keyname)
if before_id == after_id:
continue
new_events, new_key = await source.get_new_events(
user=user,
from_key=getattr(from_token, keyname),
limit=limit,
is_guest=is_peeking,
room_ids=room_ids,
explicit_room_id=explicit_room_id,
)
if name == "room":
new_events = await filter_events_for_client(
self.storage,
user.to_string(),
new_events,
is_peeking=is_peeking,
)
elif name == "presence":
now = self.clock.time_msec()
new_events[:] = [
{
"type": "m.presence",
"content": format_user_presence_state(event, now),
}
for event in new_events
]
events.extend(new_events)
end_token = end_token.copy_and_replace(keyname, new_key)
return EventStreamResult(events, (from_token, end_token))
user_id_for_stream = user.to_string()
if is_peeking:
# Internally, the notifier keeps an event stream per user_id.
# This is used by both /sync and /events.
# We want /events to be used for peeking independently of /sync,
# without polluting its contents. So we invent an illegal user ID
# (which thus cannot clash with any real users) for keying peeking
# over /events.
#
# I am sorry for what I have done.
user_id_for_stream = "_PEEKING_%s_%s" % (
explicit_room_id,
user_id_for_stream,
)
result = await self.wait_for_events(
user_id_for_stream,
timeout,
check_for_updates,
room_ids=room_ids,
from_token=from_token,
)
return result
async def _get_room_ids(
self, user: UserID, explicit_room_id: Optional[str]
) -> Tuple[Collection[str], bool]:
joined_room_ids = await self.store.get_rooms_for_user(user.to_string())
if explicit_room_id:
if explicit_room_id in joined_room_ids:
return [explicit_room_id], True
if await self._is_world_readable(explicit_room_id):
return [explicit_room_id], False
raise AuthError(403, "Non-joined access not allowed")
return joined_room_ids, True
async def _is_world_readable(self, room_id: str) -> bool:
state = await self.state_handler.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
)
if state and "history_visibility" in state.content:
return (
state.content["history_visibility"] == HistoryVisibility.WORLD_READABLE
)
else:
return False
@log_function
def remove_expired_streams(self) -> None:
time_now_ms = self.clock.time_msec()
expired_streams = []
expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS
for stream in self.user_to_user_stream.values():
if stream.count_listeners():
continue
if stream.last_notified_ms < expire_before_ts:
expired_streams.append(stream)
for expired_stream in expired_streams:
expired_stream.remove(self)
@log_function
def _register_with_keys(self, user_stream: _NotifierUserStream):
self.user_to_user_stream[user_stream.user_id] = user_stream
for room in user_stream.rooms:
s = self.room_to_user_streams.setdefault(room, set())
s.add(user_stream)
def _user_joined_room(self, user_id: str, room_id: str):
new_user_stream = self.user_to_user_stream.get(user_id)
if new_user_stream is not None:
room_streams = self.room_to_user_streams.setdefault(room_id, set())
room_streams.add(new_user_stream)
new_user_stream.rooms.add(room_id)
def notify_replication(self) -> None:
"""Notify the any replication listeners that there's a new event"""
for cb in self.replication_callbacks:
cb()
def notify_remote_server_up(self, server: str):
"""Notify any replication that a remote server has come back up"""
# We call federation_sender directly rather than registering as a
# callback as a) we already have a reference to it and b) it introduces
# circular dependencies.
if self.federation_sender:
self.federation_sender.wake_destination(server)
| [((1703, 1730), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1720, 1730), False, 'import logging\n'), ((1758, 1805), 'prometheus_client.Counter', 'Counter', (['"""synapse_notifier_notified_events"""', '""""""'], {}), "('synapse_notifier_notified_events', '')\n", (1765, 1805), False, 'from prometheus_client import Counter\n'), ((1839, 1904), 'prometheus_client.Counter', 'Counter', (['"""synapse_notifier_users_woken_by_stream"""', '""""""', "['stream']"], {}), "('synapse_notifier_users_woken_by_stream', '', ['stream'])\n", (1846, 1904), False, 'from prometheus_client import Counter\n'), ((1916, 1928), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (1923, 1928), False, 'from typing import Awaitable, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union\n'), ((6001, 6054), 'collections.namedtuple', 'namedtuple', (['"""EventStreamResult"""', "('events', 'tokens')"], {}), "('EventStreamResult', ('events', 'tokens'))\n", (6011, 6054), False, 'from collections import namedtuple\n'), ((6117, 6148), 'attr.s', 'attr.s', ([], {'slots': '(True)', 'frozen': '(True)'}), '(slots=True, frozen=True)\n', (6123, 6148), False, 'import attr\n'), ((6195, 6231), 'attr.ib', 'attr.ib', ([], {'type': 'PersistedEventPosition'}), '(type=PersistedEventPosition)\n', (6202, 6231), False, 'import attr\n'), ((6250, 6282), 'attr.ib', 'attr.ib', ([], {'type': 'Collection[UserID]'}), '(type=Collection[UserID])\n', (6257, 6282), False, 'import attr\n'), ((6298, 6315), 'attr.ib', 'attr.ib', ([], {'type': 'str'}), '(type=str)\n', (6305, 6315), False, 'import attr\n'), ((6327, 6344), 'attr.ib', 'attr.ib', ([], {'type': 'str'}), '(type=str)\n', (6334, 6344), False, 'import attr\n'), ((6361, 6388), 'attr.ib', 'attr.ib', ([], {'type': 'Optional[str]'}), '(type=Optional[str])\n', (6368, 6388), False, 'import attr\n'), ((6406, 6433), 'attr.ib', 'attr.ib', ([], {'type': 'Optional[str]'}), '(type=Optional[str])\n', (6413, 6433), False, 'import attr\n'), ((8506, 8571), 'synapse.metrics.LaterGauge', 'LaterGauge', (['"""synapse_notifier_listeners"""', '""""""', '[]', 'count_listeners'], {}), "('synapse_notifier_listeners', '', [], count_listeners)\n", (8516, 8571), False, 'from synapse.metrics import LaterGauge\n'), ((3648, 3672), 'synapse.logging.context.PreserveLoggingContext', 'PreserveLoggingContext', ([], {}), '()\n', (3670, 3672), False, 'from synapse.logging.context import PreserveLoggingContext\n'), ((4721, 4745), 'synapse.logging.context.PreserveLoggingContext', 'PreserveLoggingContext', ([], {}), '()\n', (4743, 4745), False, 'from synapse.logging.context import PreserveLoggingContext\n'), ((14467, 14502), 'synapse.util.metrics.Measure', 'Measure', (['self.clock', '"""on_new_event"""'], {}), "(self.clock, 'on_new_event')\n", (14474, 14502), False, 'from synapse.util.metrics import Measure\n'), ((23366, 23413), 'synapse.api.errors.AuthError', 'AuthError', (['(403)', '"""Non-joined access not allowed"""'], {}), "(403, 'Non-joined access not allowed')\n", (23375, 23413), False, 'from synapse.api.errors import AuthError\n'), ((3728, 3744), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (3742, 3744), False, 'from twisted.internet import defer\n'), ((4801, 4817), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (4815, 4817), False, 'from twisted.internet import defer\n'), ((5853, 5886), 'twisted.internet.defer.succeed', 'defer.succeed', (['self.current_token'], {}), '(self.current_token)\n', (5866, 5886), False, 'from twisted.internet import defer\n'), ((17371, 17416), 'synapse.logging.opentracing.start_active_span', 'start_active_span', (['"""wait_for_events.deferred"""'], {}), "('wait_for_events.deferred')\n", (17388, 17416), False, 'from synapse.logging.opentracing import log_kv, start_active_span\n'), ((17442, 17499), 'synapse.logging.opentracing.log_kv', 'log_kv', (["{'wait_for_events': 'sleep', 'token': prev_token}"], {}), "({'wait_for_events': 'sleep', 'token': prev_token})\n", (17448, 17499), False, 'from synapse.logging.opentracing import log_kv, start_active_span\n'), ((17782, 17854), 'synapse.logging.opentracing.log_kv', 'log_kv', (["{'wait_for_events': 'woken', 'token': user_stream.current_token}"], {}), "({'wait_for_events': 'woken', 'token': user_stream.current_token})\n", (17788, 17854), False, 'from synapse.logging.opentracing import log_kv, start_active_span\n'), ((18685, 18723), 'synapse.logging.opentracing.log_kv', 'log_kv', (["{'wait_for_events': 'timeout'}"], {}), "({'wait_for_events': 'timeout'})\n", (18691, 18723), False, 'from synapse.logging.opentracing import log_kv, start_active_span\n'), ((18815, 18855), 'synapse.logging.opentracing.log_kv', 'log_kv', (["{'wait_for_events': 'cancelled'}"], {}), "({'wait_for_events': 'cancelled'})\n", (18821, 18855), False, 'from synapse.logging.opentracing import log_kv, start_active_span\n'), ((17679, 17703), 'synapse.logging.context.PreserveLoggingContext', 'PreserveLoggingContext', ([], {}), '()\n', (17701, 17703), False, 'from synapse.logging.context import PreserveLoggingContext\n'), ((21686, 21724), 'synapse.handlers.presence.format_user_presence_state', 'format_user_presence_state', (['event', 'now'], {}), '(event, now)\n', (21712, 21724), False, 'from synapse.handlers.presence import format_user_presence_state\n')] |
nestfiy/saleor | saleor/checkout/tests/test_base_calculations.py | 6fce3bc5c0ca72ac28db99553e6d2b49249c6dac | from decimal import Decimal
from prices import Money, TaxedMoney
from ...discount import DiscountValueType, VoucherType
from ...discount.utils import get_product_discount_on_sale
from ..base_calculations import (
base_checkout_total,
base_tax_rate,
calculate_base_line_total_price,
calculate_base_line_unit_price,
)
from ..fetch import fetch_checkout_lines
def test_calculate_base_line_unit_price(checkout_with_single_item):
# given
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert not checkout_line_info.voucher
variant = checkout_line_info.variant
# when
prices_data = calculate_base_line_unit_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[]
)
# then
expected_price = variant.get_price(
product=checkout_line_info.product,
collections=checkout_line_info.collections,
channel=checkout_with_single_item.channel,
channel_listing=checkout_line_info.channel_listing,
discounts=[],
)
assert prices_data.undiscounted_price == expected_price
assert prices_data.price_with_sale == expected_price
assert prices_data.price_with_discounts == expected_price
def test_calculate_base_line_unit_price_with_custom_price(checkout_with_single_item):
# given
line = checkout_with_single_item.lines.first()
price_override = Decimal("12.22")
line.price_override = price_override
line.save(update_fields=["price_override"])
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert not checkout_line_info.voucher
# when
prices_data = calculate_base_line_unit_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[]
)
# then
currency = checkout_line_info.channel_listing.currency
expected_price = Money(price_override, currency)
assert prices_data.undiscounted_price == expected_price
assert prices_data.price_with_sale == expected_price
assert prices_data.price_with_discounts == expected_price
def test_calculate_base_line_unit_price_with_variant_on_sale(
checkout_with_single_item, discount_info, category
):
# given
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert not checkout_line_info.voucher
variant = checkout_line_info.variant
# set category on sale
variant.product.category = category
variant.product.save()
checkout_line_info.product = variant.product
# when
prices_data = calculate_base_line_unit_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info]
)
# then
expected_undiscounted_price = variant.get_price(
product=checkout_line_info.product,
collections=checkout_line_info.collections,
channel=checkout_with_single_item.channel,
channel_listing=checkout_line_info.channel_listing,
discounts=[],
)
product_collections = set(pc.id for pc in checkout_line_info.collections)
_, sale_discount = get_product_discount_on_sale(
product=checkout_line_info.product,
product_collections=product_collections,
discount=discount_info,
channel=checkout_with_single_item.channel,
variant_id=variant.id,
)
expected_price = sale_discount(expected_undiscounted_price)
assert prices_data.undiscounted_price == expected_undiscounted_price
assert prices_data.price_with_sale == expected_price
assert prices_data.price_with_discounts == expected_price
def test_calculate_base_line_unit_price_with_variant_on_sale_custom_price(
checkout_with_single_item, discount_info, category
):
# given
line = checkout_with_single_item.lines.first()
price_override = Decimal("20.00")
line.price_override = price_override
line.save(update_fields=["price_override"])
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert not checkout_line_info.voucher
variant = checkout_line_info.variant
# set category on sale
variant.product.category = category
variant.product.save()
checkout_line_info.product = variant.product
# when
prices_data = calculate_base_line_unit_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info]
)
# then
currency = checkout_line_info.channel_listing.currency
expected_undiscounted_price = Money(price_override, currency)
product_collections = set(pc.id for pc in checkout_line_info.collections)
_, sale_discount = get_product_discount_on_sale(
product=checkout_line_info.product,
product_collections=product_collections,
discount=discount_info,
channel=checkout_with_single_item.channel,
variant_id=variant.id,
)
expected_price = sale_discount(expected_undiscounted_price)
assert prices_data.undiscounted_price == expected_undiscounted_price
assert prices_data.price_with_sale == expected_price
assert prices_data.price_with_discounts == expected_price
def test_calculate_base_line_unit_price_with_fixed_voucher(
checkout_with_single_item, voucher, channel_USD
):
# given
checkout_line = checkout_with_single_item.lines.first()
voucher.products.add(checkout_line.variant.product)
voucher.type = VoucherType.SPECIFIC_PRODUCT
voucher.save()
voucher_amount = Money(Decimal(3), checkout_with_single_item.currency)
voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)
voucher_channel_listing.discount = voucher_amount
voucher_channel_listing.save()
checkout_with_single_item.voucher_code = voucher.code
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert checkout_line_info.voucher
variant = checkout_line_info.variant
# when
prices_data = calculate_base_line_unit_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[]
)
# then
expected_price = variant.get_price(
product=checkout_line_info.product,
collections=checkout_line_info.collections,
channel=checkout_with_single_item.channel,
channel_listing=checkout_line_info.channel_listing,
discounts=[],
)
assert prices_data.undiscounted_price == expected_price
assert prices_data.price_with_sale == expected_price
assert prices_data.price_with_discounts == expected_price - voucher_amount
def test_calculate_base_line_unit_price_with_fixed_voucher_custom_prices(
checkout_with_single_item, voucher, channel_USD
):
# given
checkout_line = checkout_with_single_item.lines.first()
price_override = Decimal("20.00")
checkout_line.price_override = price_override
checkout_line.save(update_fields=["price_override"])
voucher.products.add(checkout_line.variant.product)
voucher.type = VoucherType.SPECIFIC_PRODUCT
voucher.save()
voucher_amount = Money(Decimal(3), checkout_with_single_item.currency)
voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)
voucher_channel_listing.discount = voucher_amount
voucher_channel_listing.save()
checkout_with_single_item.voucher_code = voucher.code
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert checkout_line_info.voucher
# when
prices_data = calculate_base_line_unit_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[]
)
# then
currency = checkout_line_info.channel_listing.currency
expected_price = Money(price_override, currency)
assert prices_data.undiscounted_price == expected_price
assert prices_data.price_with_sale == expected_price
assert prices_data.price_with_discounts == expected_price - voucher_amount
def test_calculate_base_line_unit_price_with_percentage_voucher(
checkout_with_single_item, voucher, channel_USD
):
# given
checkout_line = checkout_with_single_item.lines.first()
voucher.products.add(checkout_line.variant.product)
voucher.type = VoucherType.SPECIFIC_PRODUCT
voucher.discount_value_type = DiscountValueType.PERCENTAGE
voucher.save()
voucher_percent_value = Decimal(10)
voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)
voucher_channel_listing.discount_value = voucher_percent_value
voucher_channel_listing.save()
checkout_with_single_item.voucher_code = voucher.code
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert checkout_line_info.voucher
variant = checkout_line_info.variant
# when
prices_data = calculate_base_line_unit_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[]
)
# then
expected_voucher_amount = Money(Decimal("1"), checkout_with_single_item.currency)
expected_price = variant.get_price(
product=checkout_line_info.product,
collections=checkout_line_info.collections,
channel=checkout_with_single_item.channel,
channel_listing=checkout_line_info.channel_listing,
discounts=[],
)
assert prices_data.undiscounted_price == expected_price
assert prices_data.price_with_sale == expected_price
assert prices_data.price_with_discounts == expected_price - expected_voucher_amount
def test_calculate_base_line_unit_price_with_percentage_voucher_custom_prices(
checkout_with_single_item, voucher, channel_USD
):
# given
checkout_line = checkout_with_single_item.lines.first()
price_override = Decimal("20.00")
checkout_line.price_override = price_override
checkout_line.save(update_fields=["price_override"])
voucher.products.add(checkout_line.variant.product)
voucher.type = VoucherType.SPECIFIC_PRODUCT
voucher.discount_value_type = DiscountValueType.PERCENTAGE
voucher.save()
voucher_percent_value = Decimal(10)
voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)
voucher_channel_listing.discount_value = voucher_percent_value
voucher_channel_listing.save()
checkout_with_single_item.voucher_code = voucher.code
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert checkout_line_info.voucher
# when
prices_data = calculate_base_line_unit_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[]
)
# then
currency = checkout_line_info.channel_listing.currency
expected_price = Money(price_override, currency)
expected_voucher_amount = Money(
price_override * voucher_percent_value / 100, checkout_with_single_item.currency
)
assert prices_data.undiscounted_price == expected_price
assert prices_data.price_with_sale == expected_price
assert prices_data.price_with_discounts == expected_price - expected_voucher_amount
def test_calculate_base_line_unit_price_with_discounts_apply_once_per_order(
checkout_with_single_item, voucher, channel_USD
):
# given
checkout_line = checkout_with_single_item.lines.first()
voucher.products.add(checkout_line.variant.product)
voucher.type = VoucherType.SPECIFIC_PRODUCT
voucher.apply_once_per_order = True
voucher.discount_value_type = DiscountValueType.PERCENTAGE
voucher.save()
voucher_percent_value = Decimal(10)
voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)
voucher_channel_listing.discount_value = voucher_percent_value
voucher_channel_listing.save()
checkout_with_single_item.voucher_code = voucher.code
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert checkout_line_info.voucher
variant = checkout_line_info.variant
# when
prices_data = calculate_base_line_unit_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[]
)
# then
expected_price = variant.get_price(
product=checkout_line_info.product,
collections=checkout_line_info.collections,
channel=checkout_with_single_item.channel,
channel_listing=checkout_line_info.channel_listing,
discounts=[],
)
assert prices_data.undiscounted_price == expected_price
assert prices_data.price_with_sale == expected_price
# apply once per order is applied when calculating line total.
assert prices_data.price_with_discounts == expected_price
def test_calculate_base_line_unit_price_with_discounts_once_per_order_custom_prices(
checkout_with_single_item, voucher, channel_USD
):
# given
checkout_line = checkout_with_single_item.lines.first()
price_override = Decimal("20.00")
checkout_line.price_override = price_override
checkout_line.save(update_fields=["price_override"])
voucher.products.add(checkout_line.variant.product)
voucher.type = VoucherType.SPECIFIC_PRODUCT
voucher.apply_once_per_order = True
voucher.discount_value_type = DiscountValueType.PERCENTAGE
voucher.save()
voucher_percent_value = Decimal(10)
voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)
voucher_channel_listing.discount_value = voucher_percent_value
voucher_channel_listing.save()
checkout_with_single_item.voucher_code = voucher.code
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert checkout_line_info.voucher
# when
prices_data = calculate_base_line_unit_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[]
)
# then
currency = checkout_line_info.channel_listing.currency
expected_price = Money(price_override, currency)
assert prices_data.undiscounted_price == expected_price
assert prices_data.price_with_sale == expected_price
# apply once per order is applied when calculating line total.
assert prices_data.price_with_discounts == expected_price
def test_calculate_base_line_unit_price_with_variant_on_sale_and_voucher(
checkout_with_single_item, discount_info, category, voucher, channel_USD
):
# given
checkout_line = checkout_with_single_item.lines.first()
voucher.products.add(checkout_line.variant.product)
voucher.type = VoucherType.SPECIFIC_PRODUCT
voucher.save()
voucher_amount = Money(Decimal(3), checkout_with_single_item.currency)
voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)
voucher_channel_listing.discount = voucher_amount
voucher_channel_listing.save()
checkout_with_single_item.voucher_code = voucher.code
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert checkout_line_info.voucher
variant = checkout_line_info.variant
# set category on sale
variant.product.category = category
variant.product.save()
checkout_line_info.product = variant.product
# when
prices_data = calculate_base_line_unit_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info]
)
# then
expected_undiscounted_price = variant.get_price(
product=checkout_line_info.product,
collections=checkout_line_info.collections,
channel=checkout_with_single_item.channel,
channel_listing=checkout_line_info.channel_listing,
discounts=[],
)
product_collections = set(pc.id for pc in checkout_line_info.collections)
_, sale_discount = get_product_discount_on_sale(
product=checkout_line_info.product,
product_collections=product_collections,
discount=discount_info,
channel=checkout_with_single_item.channel,
variant_id=variant.id,
)
sale_discount_amount = sale_discount(expected_undiscounted_price)
expected_price = expected_undiscounted_price - sale_discount_amount
assert prices_data.undiscounted_price == expected_undiscounted_price
assert prices_data.price_with_sale == expected_price
assert prices_data.price_with_discounts == expected_price - voucher_amount
def test_calculate_base_line_total_price(checkout_with_single_item):
# given
quantity = 3
checkout_line = checkout_with_single_item.lines.first()
checkout_line.quantity = quantity
checkout_line.save()
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert not checkout_line_info.voucher
variant = checkout_line_info.variant
# when
prices_data = calculate_base_line_total_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[]
)
# then
expected_price = variant.get_price(
product=checkout_line_info.product,
collections=checkout_line_info.collections,
channel=checkout_with_single_item.channel,
channel_listing=checkout_line_info.channel_listing,
discounts=[],
)
assert prices_data.undiscounted_price == expected_price * quantity
assert prices_data.price_with_sale == expected_price * quantity
assert prices_data.price_with_discounts == expected_price * quantity
def test_calculate_base_line_total_price_with_variant_on_sale(
checkout_with_single_item, discount_info, category
):
# given
quantity = 3
checkout_line = checkout_with_single_item.lines.first()
checkout_line.quantity = quantity
checkout_line.save()
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert not checkout_line_info.voucher
variant = checkout_line_info.variant
# set category on sale
variant.product.category = category
variant.product.save()
checkout_line_info.product = variant.product
# when
prices_data = calculate_base_line_total_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info]
)
# then
expected_undiscounted_unit_price = variant.get_price(
product=checkout_line_info.product,
collections=checkout_line_info.collections,
channel=checkout_with_single_item.channel,
channel_listing=checkout_line_info.channel_listing,
discounts=[],
)
product_collections = set(pc.id for pc in checkout_line_info.collections)
_, sale_discount = get_product_discount_on_sale(
product=checkout_line_info.product,
product_collections=product_collections,
discount=discount_info,
channel=checkout_with_single_item.channel,
variant_id=variant.id,
)
sale_discount_amount = sale_discount(expected_undiscounted_unit_price)
expected_price = expected_undiscounted_unit_price - sale_discount_amount
assert prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity
assert prices_data.price_with_sale == expected_price * quantity
assert prices_data.price_with_discounts == expected_price * quantity
def test_calculate_base_line_total_price_with_fixed_voucher(
checkout_with_single_item, voucher, channel_USD
):
# given
quantity = 3
checkout_line = checkout_with_single_item.lines.first()
checkout_line.quantity = quantity
checkout_line.save()
checkout_line = checkout_with_single_item.lines.first()
voucher.products.add(checkout_line.variant.product)
voucher.type = VoucherType.SPECIFIC_PRODUCT
voucher.save()
voucher_amount = Money(Decimal(3), checkout_with_single_item.currency)
voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)
voucher_channel_listing.discount = voucher_amount
voucher_channel_listing.save()
checkout_with_single_item.voucher_code = voucher.code
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert checkout_line_info.voucher
variant = checkout_line_info.variant
# when
prices_data = calculate_base_line_total_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[]
)
# then
expected_unit_price = variant.get_price(
product=checkout_line_info.product,
collections=checkout_line_info.collections,
channel=checkout_with_single_item.channel,
channel_listing=checkout_line_info.channel_listing,
discounts=[],
)
assert prices_data.undiscounted_price == expected_unit_price * quantity
assert prices_data.price_with_sale == expected_unit_price * quantity
assert (
prices_data.price_with_discounts
== (expected_unit_price - voucher_amount) * quantity
)
def test_calculate_base_line_total_price_with_percentage_voucher(
checkout_with_single_item, voucher, channel_USD
):
# given
quantity = 3
checkout_line = checkout_with_single_item.lines.first()
checkout_line.quantity = quantity
checkout_line.save()
checkout_line = checkout_with_single_item.lines.first()
voucher.products.add(checkout_line.variant.product)
voucher.type = VoucherType.SPECIFIC_PRODUCT
voucher.discount_value_type = DiscountValueType.PERCENTAGE
voucher.save()
voucher_percent_value = Decimal(10)
voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)
voucher_channel_listing.discount_value = voucher_percent_value
voucher_channel_listing.save()
checkout_with_single_item.voucher_code = voucher.code
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert checkout_line_info.voucher
variant = checkout_line_info.variant
# when
prices_data = calculate_base_line_total_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[]
)
# then
expected_voucher_amount = Money(Decimal("1"), checkout_with_single_item.currency)
expected_unit_price = variant.get_price(
product=checkout_line_info.product,
collections=checkout_line_info.collections,
channel=checkout_with_single_item.channel,
channel_listing=checkout_line_info.channel_listing,
discounts=[],
)
assert prices_data.undiscounted_price == expected_unit_price * quantity
assert prices_data.price_with_sale == expected_unit_price * quantity
assert (
prices_data.price_with_discounts
== (expected_unit_price - expected_voucher_amount) * quantity
)
def test_calculate_base_line_total_price_with_discounts_apply_once_per_order(
checkout_with_single_item, voucher, channel_USD
):
# given
quantity = 3
checkout_line = checkout_with_single_item.lines.first()
checkout_line.quantity = quantity
checkout_line.save()
checkout_line = checkout_with_single_item.lines.first()
voucher.products.add(checkout_line.variant.product)
voucher.type = VoucherType.SPECIFIC_PRODUCT
voucher.apply_once_per_order = True
voucher.discount_value_type = DiscountValueType.PERCENTAGE
voucher.save()
voucher_percent_value = Decimal(10)
voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)
voucher_channel_listing.discount_value = voucher_percent_value
voucher_channel_listing.save()
checkout_with_single_item.voucher_code = voucher.code
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert checkout_line_info.voucher
variant = checkout_line_info.variant
# when
prices_data = calculate_base_line_total_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[]
)
# then
expected_voucher_amount = Money(Decimal("1"), checkout_with_single_item.currency)
expected_unit_price = variant.get_price(
product=checkout_line_info.product,
collections=checkout_line_info.collections,
channel=checkout_with_single_item.channel,
channel_listing=checkout_line_info.channel_listing,
discounts=[],
)
assert prices_data.undiscounted_price == expected_unit_price * quantity
assert prices_data.price_with_sale == expected_unit_price * quantity
# apply once per order is applied when calculating line total.
assert (
prices_data.price_with_discounts
== (expected_unit_price * quantity) - expected_voucher_amount
)
def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher(
checkout_with_single_item, discount_info, category, voucher, channel_USD
):
# given
quantity = 3
checkout_line = checkout_with_single_item.lines.first()
checkout_line.quantity = quantity
checkout_line.save()
checkout_line = checkout_with_single_item.lines.first()
voucher.products.add(checkout_line.variant.product)
voucher.type = VoucherType.SPECIFIC_PRODUCT
voucher.save()
voucher_amount = Money(Decimal(3), checkout_with_single_item.currency)
voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)
voucher_channel_listing.discount = voucher_amount
voucher_channel_listing.save()
checkout_with_single_item.voucher_code = voucher.code
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert checkout_line_info.voucher
variant = checkout_line_info.variant
# set category on sale
variant.product.category = category
variant.product.save()
checkout_line_info.product = variant.product
# when
prices_data = calculate_base_line_total_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info]
)
# then
expected_undiscounted_unit_price = variant.get_price(
product=checkout_line_info.product,
collections=checkout_line_info.collections,
channel=checkout_with_single_item.channel,
channel_listing=checkout_line_info.channel_listing,
discounts=[],
)
product_collections = set(pc.id for pc in checkout_line_info.collections)
_, sale_discount = get_product_discount_on_sale(
product=checkout_line_info.product,
product_collections=product_collections,
discount=discount_info,
channel=checkout_with_single_item.channel,
variant_id=variant.id,
)
sale_discount_amount = sale_discount(expected_undiscounted_unit_price)
expected_unit_price = expected_undiscounted_unit_price - sale_discount_amount
assert prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity
assert prices_data.price_with_sale == expected_unit_price * quantity
assert (
prices_data.price_with_discounts
== (expected_unit_price - voucher_amount) * quantity
)
def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher_applied_once(
checkout_with_single_item, discount_info, category, voucher, channel_USD
):
# given
quantity = 3
checkout_line = checkout_with_single_item.lines.first()
checkout_line.quantity = quantity
checkout_line.save()
checkout_line = checkout_with_single_item.lines.first()
voucher.products.add(checkout_line.variant.product)
voucher.type = VoucherType.SPECIFIC_PRODUCT
voucher.apply_once_per_order = True
voucher.save()
voucher_amount = Money(Decimal(3), checkout_with_single_item.currency)
voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD)
voucher_channel_listing.discount = voucher_amount
voucher_channel_listing.save()
checkout_with_single_item.voucher_code = voucher.code
checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item)
checkout_line_info = checkout_lines_info[0]
assert checkout_line_info.voucher
variant = checkout_line_info.variant
# set category on sale
variant.product.category = category
variant.product.save()
checkout_line_info.product = variant.product
# when
prices_data = calculate_base_line_total_price(
checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info]
)
# then
expected_undiscounted_unit_price = variant.get_price(
product=checkout_line_info.product,
collections=checkout_line_info.collections,
channel=checkout_with_single_item.channel,
channel_listing=checkout_line_info.channel_listing,
discounts=[],
)
product_collections = set(pc.id for pc in checkout_line_info.collections)
_, sale_discount = get_product_discount_on_sale(
product=checkout_line_info.product,
product_collections=product_collections,
discount=discount_info,
channel=checkout_with_single_item.channel,
variant_id=variant.id,
)
sale_discount_amount = sale_discount(expected_undiscounted_unit_price)
expected_unit_price = expected_undiscounted_unit_price - sale_discount_amount
assert prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity
assert prices_data.price_with_sale == expected_unit_price * quantity
assert (
prices_data.price_with_discounts
== (expected_unit_price * quantity) - voucher_amount
)
def test_base_tax_rate_net_price_zero():
price = TaxedMoney(net=Money(0, "USD"), gross=Money(3, "USD"))
assert base_tax_rate(price) == Decimal("0.0")
def test_base_tax_rate_gross_price_zero():
price = TaxedMoney(net=Money(3, "USD"), gross=Money(0, "USD"))
assert base_tax_rate(price) == Decimal("0.0")
def test_base_checkout_total():
# given
currency = "USD"
taxed_money = TaxedMoney(net=Money(10, currency), gross=Money(10, currency))
subtotal = taxed_money
shipping_price = taxed_money
discount = Money(5, currency)
# when
total = base_checkout_total(subtotal, shipping_price, discount, currency)
expected = subtotal + shipping_price - discount
# then
assert total == expected
def test_base_checkout_total_high_discount():
# given
currency = "USD"
zero_taxed_money = TaxedMoney(net=Money(0, currency), gross=Money(0, currency))
subtotal = TaxedMoney(net=Money(10, currency), gross=Money(12, currency))
shipping_price = zero_taxed_money
discount = Money(20, currency)
# when
total = base_checkout_total(subtotal, shipping_price, discount, currency)
# then
assert total == zero_taxed_money
| [((1447, 1463), 'decimal.Decimal', 'Decimal', (['"""12.22"""'], {}), "('12.22')\n", (1454, 1463), False, 'from decimal import Decimal\n'), ((1957, 1988), 'prices.Money', 'Money', (['price_override', 'currency'], {}), '(price_override, currency)\n', (1962, 1988), False, 'from prices import Money, TaxedMoney\n'), ((3930, 3946), 'decimal.Decimal', 'Decimal', (['"""20.00"""'], {}), "('20.00')\n", (3937, 3946), False, 'from decimal import Decimal\n'), ((4650, 4681), 'prices.Money', 'Money', (['price_override', 'currency'], {}), '(price_override, currency)\n', (4655, 4681), False, 'from prices import Money, TaxedMoney\n'), ((6955, 6971), 'decimal.Decimal', 'Decimal', (['"""20.00"""'], {}), "('20.00')\n", (6962, 6971), False, 'from decimal import Decimal\n'), ((7906, 7937), 'prices.Money', 'Money', (['price_override', 'currency'], {}), '(price_override, currency)\n', (7911, 7937), False, 'from prices import Money, TaxedMoney\n'), ((8544, 8555), 'decimal.Decimal', 'Decimal', (['(10)'], {}), '(10)\n', (8551, 8555), False, 'from decimal import Decimal\n'), ((9952, 9968), 'decimal.Decimal', 'Decimal', (['"""20.00"""'], {}), "('20.00')\n", (9959, 9968), False, 'from decimal import Decimal\n'), ((10292, 10303), 'decimal.Decimal', 'Decimal', (['(10)'], {}), '(10)\n', (10299, 10303), False, 'from decimal import Decimal\n'), ((10944, 10975), 'prices.Money', 'Money', (['price_override', 'currency'], {}), '(price_override, currency)\n', (10949, 10975), False, 'from prices import Money, TaxedMoney\n'), ((11006, 11097), 'prices.Money', 'Money', (['(price_override * voucher_percent_value / 100)', 'checkout_with_single_item.currency'], {}), '(price_override * voucher_percent_value / 100,\n checkout_with_single_item.currency)\n', (11011, 11097), False, 'from prices import Money, TaxedMoney\n'), ((11775, 11786), 'decimal.Decimal', 'Decimal', (['(10)'], {}), '(10)\n', (11782, 11786), False, 'from decimal import Decimal\n'), ((13144, 13160), 'decimal.Decimal', 'Decimal', (['"""20.00"""'], {}), "('20.00')\n", (13151, 13160), False, 'from decimal import Decimal\n'), ((13524, 13535), 'decimal.Decimal', 'Decimal', (['(10)'], {}), '(10)\n', (13531, 13535), False, 'from decimal import Decimal\n'), ((14176, 14207), 'prices.Money', 'Money', (['price_override', 'currency'], {}), '(price_override, currency)\n', (14181, 14207), False, 'from prices import Money, TaxedMoney\n'), ((21723, 21734), 'decimal.Decimal', 'Decimal', (['(10)'], {}), '(10)\n', (21730, 21734), False, 'from decimal import Decimal\n'), ((23586, 23597), 'decimal.Decimal', 'Decimal', (['(10)'], {}), '(10)\n', (23593, 23597), False, 'from decimal import Decimal\n'), ((30298, 30316), 'prices.Money', 'Money', (['(5)', 'currency'], {}), '(5, currency)\n', (30303, 30316), False, 'from prices import Money, TaxedMoney\n'), ((30796, 30815), 'prices.Money', 'Money', (['(20)', 'currency'], {}), '(20, currency)\n', (30801, 30815), False, 'from prices import Money, TaxedMoney\n'), ((5624, 5634), 'decimal.Decimal', 'Decimal', (['(3)'], {}), '(3)\n', (5631, 5634), False, 'from decimal import Decimal\n'), ((7231, 7241), 'decimal.Decimal', 'Decimal', (['(3)'], {}), '(3)\n', (7238, 7241), False, 'from decimal import Decimal\n'), ((9193, 9205), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (9200, 9205), False, 'from decimal import Decimal\n'), ((14832, 14842), 'decimal.Decimal', 'Decimal', (['(3)'], {}), '(3)\n', (14839, 14842), False, 'from decimal import Decimal\n'), ((19984, 19994), 'decimal.Decimal', 'Decimal', (['(3)'], {}), '(3)\n', (19991, 19994), False, 'from decimal import Decimal\n'), ((22373, 22385), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (22380, 22385), False, 'from decimal import Decimal\n'), ((24236, 24248), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (24243, 24248), False, 'from decimal import Decimal\n'), ((25434, 25444), 'decimal.Decimal', 'Decimal', (['(3)'], {}), '(3)\n', (25441, 25444), False, 'from decimal import Decimal\n'), ((27881, 27891), 'decimal.Decimal', 'Decimal', (['(3)'], {}), '(3)\n', (27888, 27891), False, 'from decimal import Decimal\n'), ((29898, 29912), 'decimal.Decimal', 'Decimal', (['"""0.0"""'], {}), "('0.0')\n", (29905, 29912), False, 'from decimal import Decimal\n'), ((30060, 30074), 'decimal.Decimal', 'Decimal', (['"""0.0"""'], {}), "('0.0')\n", (30067, 30074), False, 'from decimal import Decimal\n'), ((29823, 29838), 'prices.Money', 'Money', (['(0)', '"""USD"""'], {}), "(0, 'USD')\n", (29828, 29838), False, 'from prices import Money, TaxedMoney\n'), ((29846, 29861), 'prices.Money', 'Money', (['(3)', '"""USD"""'], {}), "(3, 'USD')\n", (29851, 29861), False, 'from prices import Money, TaxedMoney\n'), ((29985, 30000), 'prices.Money', 'Money', (['(3)', '"""USD"""'], {}), "(3, 'USD')\n", (29990, 30000), False, 'from prices import Money, TaxedMoney\n'), ((30008, 30023), 'prices.Money', 'Money', (['(0)', '"""USD"""'], {}), "(0, 'USD')\n", (30013, 30023), False, 'from prices import Money, TaxedMoney\n'), ((30175, 30194), 'prices.Money', 'Money', (['(10)', 'currency'], {}), '(10, currency)\n', (30180, 30194), False, 'from prices import Money, TaxedMoney\n'), ((30202, 30221), 'prices.Money', 'Money', (['(10)', 'currency'], {}), '(10, currency)\n', (30207, 30221), False, 'from prices import Money, TaxedMoney\n'), ((30619, 30637), 'prices.Money', 'Money', (['(0)', 'currency'], {}), '(0, currency)\n', (30624, 30637), False, 'from prices import Money, TaxedMoney\n'), ((30645, 30663), 'prices.Money', 'Money', (['(0)', 'currency'], {}), '(0, currency)\n', (30650, 30663), False, 'from prices import Money, TaxedMoney\n'), ((30695, 30714), 'prices.Money', 'Money', (['(10)', 'currency'], {}), '(10, currency)\n', (30700, 30714), False, 'from prices import Money, TaxedMoney\n'), ((30722, 30741), 'prices.Money', 'Money', (['(12)', 'currency'], {}), '(12, currency)\n', (30727, 30741), False, 'from prices import Money, TaxedMoney\n')] |
andy-z/ged4py | tests/test_date.py | 2270bd8366174dcc98424cc6671bdaecf770fda0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `ged4py.date` module."""
import unittest
from ged4py.calendar import (
CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate,
CalendarDateVisitor
)
from ged4py.date import (
DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated,
DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod,
DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes,
DateValueVisitor
)
class TestDateVisitor(CalendarDateVisitor, DateValueVisitor):
def visitGregorian(self, date):
if not isinstance(date, GregorianDate):
raise TypeError(str(type(date)))
return ("gregorian", date)
def visitJulian(self, date):
if not isinstance(date, JulianDate):
raise TypeError(str(type(date)))
return ("julian", date)
def visitHebrew(self, date):
if not isinstance(date, HebrewDate):
raise TypeError(str(type(date)))
return ("hebrew", date)
def visitFrench(self, date):
if not isinstance(date, FrenchDate):
raise TypeError(str(type(date)))
return ("french", date)
def visitSimple(self, date):
if not isinstance(date, DateValueSimple):
raise TypeError(str(type(date)))
return ("simple", date.date)
def visitPeriod(self, date):
if not isinstance(date, DateValuePeriod):
raise TypeError(str(type(date)))
return ("period", date.date1, date.date2)
def visitFrom(self, date):
if not isinstance(date, DateValueFrom):
raise TypeError(str(type(date)))
return ("from", date.date)
def visitTo(self, date):
if not isinstance(date, DateValueTo):
raise TypeError(str(type(date)))
return ("to", date.date)
def visitRange(self, date):
if not isinstance(date, DateValueRange):
raise TypeError(str(type(date)))
return ("range", date.date1, date.date2)
def visitBefore(self, date):
if not isinstance(date, DateValueBefore):
raise TypeError(str(type(date)))
return ("before", date.date)
def visitAfter(self, date):
if not isinstance(date, DateValueAfter):
raise TypeError(str(type(date)))
return ("after", date.date)
def visitAbout(self, date):
if not isinstance(date, DateValueAbout):
raise TypeError(str(type(date)))
return ("about", date.date)
def visitCalculated(self, date):
if not isinstance(date, DateValueCalculated):
raise TypeError(str(type(date)))
return ("calculated", date.date)
def visitEstimated(self, date):
if not isinstance(date, DateValueEstimated):
raise TypeError(str(type(date)))
return ("estimated", date.date)
def visitInterpreted(self, date):
if not isinstance(date, DateValueInterpreted):
raise TypeError(str(type(date)))
return ("interpreted", date.date, date.phrase)
def visitPhrase(self, date):
if not isinstance(date, DateValuePhrase):
raise TypeError(str(type(date)))
return ("phrase", date.phrase)
class TestDetailDate(unittest.TestCase):
"""Tests for `ged4py.date` module."""
def test_001_cal_date(self):
"""Test date.CalendarDate class."""
date = GregorianDate(2017, "OCT", 9)
self.assertEqual(date.year, 2017)
self.assertIsNone(date.dual_year)
self.assertFalse(date.bc)
self.assertEqual(date.year_str, "2017")
self.assertEqual(date.month, "OCT")
self.assertEqual(date.month_num, 10)
self.assertEqual(date.day, 9)
self.assertEqual(date.calendar, CalendarType.GREGORIAN)
date = GregorianDate(2017, "OCT", bc=True)
self.assertEqual(date.year, 2017)
self.assertIsNone(date.dual_year)
self.assertTrue(date.bc)
self.assertEqual(date.year_str, "2017 B.C.")
self.assertEqual(date.month, "OCT")
self.assertEqual(date.month_num, 10)
self.assertIsNone(date.day)
self.assertEqual(date.calendar, CalendarType.GREGORIAN)
date = GregorianDate(1699, "FEB", dual_year=1700)
self.assertEqual(date.year, 1699)
self.assertEqual(date.dual_year, 1700)
self.assertFalse(date.bc)
self.assertEqual(date.year_str, "1699/00")
self.assertEqual(date.month, "FEB")
self.assertEqual(date.month_num, 2)
self.assertIsNone(date.day)
self.assertEqual(date.calendar, CalendarType.GREGORIAN)
date = HebrewDate(5000)
self.assertEqual(date.year, 5000)
self.assertFalse(date.bc)
self.assertEqual(date.year_str, "5000")
self.assertIsNone(date.month)
self.assertIsNone(date.month_num)
self.assertIsNone(date.day)
self.assertEqual(date.calendar, CalendarType.HEBREW)
date = FrenchDate(1, "FRUC", 1)
self.assertEqual(date.year, 1)
self.assertFalse(date.bc)
self.assertEqual(date.year_str, "1")
self.assertEqual(date.month, "FRUC")
self.assertEqual(date.month_num, 12)
self.assertEqual(date.day, 1)
self.assertEqual(date.calendar, CalendarType.FRENCH_R)
date = JulianDate(5, "JAN", bc=True)
self.assertEqual(date.year, 5)
self.assertTrue(date.bc)
self.assertEqual(date.year_str, "5 B.C.")
self.assertEqual(date.month, "JAN")
self.assertEqual(date.month_num, 1)
self.assertIsNone(date.day)
self.assertEqual(date.calendar, CalendarType.JULIAN)
def test_002_cal_date_key(self):
"""Test date.CalendarDate class."""
date = GregorianDate(2017, "OCT", 9)
self.assertEqual(date.key(), (2458035.5, 0))
date = GregorianDate(1699, "FEB", 1, dual_year=1700)
self.assertEqual(date.key(), (2342003.5, 0))
date = FrenchDate(2017, "VENT", bc=True)
self.assertEqual(date.key(), (1638959.5, 1))
date = HebrewDate(2017, "TSH", 22)
self.assertEqual(date.key(), (1084542.5, 0))
date = JulianDate(1000)
self.assertEqual(date.key(), (2086672.5, 1))
def test_003_cal_date_cmp(self):
"""Test date.CalendarDate class."""
self.assertTrue(GregorianDate(2016, "JAN", 1) < GregorianDate(2017, "JAN", 1))
self.assertTrue(GregorianDate(2017, "JAN", 1) < GregorianDate(2017, "FEB", 1))
self.assertTrue(GregorianDate(2017, "JAN", 1) < GregorianDate(2017, "JAN", 2))
self.assertTrue(GregorianDate(2017, "JAN", 1) <= GregorianDate(2017, "JAN", 2))
self.assertTrue(GregorianDate(2017, "JAN", 2) > GregorianDate(2017, "JAN", 1))
self.assertTrue(GregorianDate(2017, "JAN", 2) >= GregorianDate(2017, "JAN", 1))
self.assertTrue(GregorianDate(2017, "JAN", 1) == GregorianDate(2017, "JAN", 1))
self.assertTrue(GregorianDate(2017, "JAN", 1) != GregorianDate(2017, "JAN", 2))
# missing day compares as "past" the last day of month, but before next month
self.assertTrue(GregorianDate(2017, "JAN") > GregorianDate(2017, "JAN", 31))
self.assertTrue(GregorianDate(2017, "JAN") < GregorianDate(2017, "FEB", 1))
# missing month compares as "past" the last day of year, but before next year
self.assertTrue(GregorianDate(2017) > GregorianDate(2017, "DEC", 31))
self.assertTrue(GregorianDate(2017) < GregorianDate(2018, "JAN", 1))
# dual date
self.assertTrue(GregorianDate(1700, "JAN", 1) == GregorianDate(1699, "JAN", 1, dual_year=1700))
# compare Gregorian and Julian dates
self.assertTrue(GregorianDate(1582, "OCT", 15) == JulianDate(1582, "OCT", 5))
self.assertTrue(GregorianDate(1582, "OCT", 16) > JulianDate(1582, "OCT", 5))
self.assertTrue(JulianDate(1582, "OCT", 6) > GregorianDate(1582, "OCT", 15))
self.assertTrue(GregorianDate(2000, "JAN", 14) == JulianDate(2000, "JAN", 1))
# compare Gregorian and French dates
self.assertTrue(GregorianDate(1792, "SEP", 22) == FrenchDate(1, "VEND", 1))
self.assertTrue(GregorianDate(1792, "SEP", 23) > FrenchDate(1, "VEND", 1))
self.assertTrue(FrenchDate(1, "VEND", 2) > GregorianDate(1792, "SEP", 22))
self.assertTrue(GregorianDate(2020, "SEP", 21) == FrenchDate(228, "COMP", 5))
# compare Gregorian and Hebrew dates
self.assertTrue(GregorianDate(2020, "JAN", 1) == HebrewDate(5780, "SVN", 4))
def test_004_cal_date_str(self):
"""Test date.CalendarDate class."""
date = GregorianDate(2017, "OCT", 9)
self.assertEqual(str(date), "9 OCT 2017")
date = GregorianDate(2017, "OCT", bc=True)
self.assertEqual(str(date), "OCT 2017 B.C.")
date = GregorianDate(1699, "JAN", 1, dual_year=1700)
self.assertEqual(str(date), "1 JAN 1699/00")
date = HebrewDate(5000)
self.assertEqual(str(date), "@#DHEBREW@ 5000")
date = FrenchDate(1, "VEND", 1)
self.assertEqual(str(date), "@#DFRENCH R@ 1 VEND 1")
date = JulianDate(1582, "OCT", 5)
self.assertEqual(str(date), "@#DJULIAN@ 5 OCT 1582")
def test_005_cal_date_parse(self):
"""Test date.CalendarDate.parse method."""
date = CalendarDate.parse("31 MAY 2020")
self.assertIsInstance(date, GregorianDate)
self.assertEqual(date.year, 2020)
self.assertIsNone(date.dual_year)
self.assertFalse(date.bc)
self.assertEqual(date.month, "MAY")
self.assertEqual(date.month_num, 5)
self.assertEqual(date.day, 31)
self.assertEqual(date.original, "31 MAY 2020")
self.assertEqual(date.calendar, CalendarType.GREGORIAN)
date = CalendarDate.parse("@#DGREGORIAN@ 10 MAR 1698/99")
self.assertIsInstance(date, GregorianDate)
self.assertEqual(date.year, 1698)
self.assertEqual(date.dual_year, 1699)
self.assertFalse(date.bc)
self.assertEqual(date.month, "MAR")
self.assertEqual(date.month_num, 3)
self.assertEqual(date.day, 10)
self.assertEqual(date.original, "@#DGREGORIAN@ 10 MAR 1698/99")
self.assertEqual(date.calendar, CalendarType.GREGORIAN)
date = CalendarDate.parse("10 MAR 1699/00")
self.assertIsInstance(date, GregorianDate)
self.assertEqual(date.year, 1699)
self.assertEqual(date.dual_year, 1700)
self.assertEqual(date.original, "10 MAR 1699/00")
self.assertEqual(date.calendar, CalendarType.GREGORIAN)
date = CalendarDate.parse("@#DJULIAN@ 100 B.C.")
self.assertIsInstance(date, JulianDate)
self.assertEqual(date.year, 100)
self.assertTrue(date.bc)
self.assertIsNone(date.month)
self.assertIsNone(date.month_num)
self.assertIsNone(date.day)
self.assertEqual(date.original, "@#DJULIAN@ 100 B.C.")
self.assertEqual(date.calendar, CalendarType.JULIAN)
date = CalendarDate.parse("@#DFRENCH R@ 15 GERM 0001")
self.assertIsInstance(date, FrenchDate)
self.assertEqual(date.year, 1)
self.assertFalse(date.bc)
self.assertEqual(date.month, "GERM")
self.assertEqual(date.month_num, 7)
self.assertEqual(date.day, 15)
self.assertEqual(date.original, "@#DFRENCH R@ 15 GERM 0001")
self.assertEqual(date.calendar, CalendarType.FRENCH_R)
date = CalendarDate.parse("@#DHEBREW@ 7 NSN 5000")
self.assertIsInstance(date, HebrewDate)
self.assertEqual(date.year, 5000)
self.assertFalse(date.bc)
self.assertEqual(date.month, "NSN")
self.assertEqual(date.month_num, 8)
self.assertEqual(date.day, 7)
self.assertEqual(date.original, "@#DHEBREW@ 7 NSN 5000")
self.assertEqual(date.calendar, CalendarType.HEBREW)
# cannot handle ROMAN
with self.assertRaises(ValueError):
date = CalendarDate.parse("@#DROMAN@ 2020")
# cannot handle UNKNOWN
with self.assertRaises(ValueError):
date = CalendarDate.parse("@#DUNKNOWN@ 2020")
# dual year only works for GREGORIAN
with self.assertRaises(ValueError):
date = CalendarDate.parse("@#DJULIAN@ 2020/21")
# cannot parse nonsense
with self.assertRaises(ValueError):
date = CalendarDate.parse("start of time")
def test_006_cal_date_visitor(self):
"""Test date.CalendarDate.accept method."""
visitor = TestDateVisitor()
date = GregorianDate(2017, "OCT", 9)
value = date.accept(visitor)
self.assertEqual(value, ("gregorian", date))
date = HebrewDate(5000)
value = date.accept(visitor)
self.assertEqual(value, ("hebrew", date))
date = FrenchDate(1, "VEND", 1)
value = date.accept(visitor)
self.assertEqual(value, ("french", date))
date = JulianDate(1582, "OCT", 5)
value = date.accept(visitor)
self.assertEqual(value, ("julian", date))
def test_007_cal_date_hash(self):
"""Test date.CalendarDate hash."""
self.assertEqual(hash(GregorianDate(2017, "OCT", 9)),
hash(GregorianDate(2017, "OCT", 9)))
self.assertEqual(hash(GregorianDate(2017, "OCT", 9, bc=True)),
hash(GregorianDate(2017, "OCT", 9, bc=True)))
self.assertEqual(hash(FrenchDate(1, "VEND", 1)),
hash(FrenchDate(1, "VEND", 1)))
self.assertEqual(hash(FrenchDate(1)),
hash(FrenchDate(1)))
def test_010_date_no_date(self):
"""Test date.DateValue class."""
date = DateValue.parse("not a date")
self.assertIsInstance(date, DateValuePhrase)
self.assertEqual(date.kind, DateValueTypes.PHRASE)
self.assertEqual(date.phrase, "not a date")
self.assertEqual(str(date), "(not a date)")
def test_012_date_parse_period(self):
"""Test date.DateValue class."""
date = DateValue.parse("FROM 1967")
self.assertIsInstance(date, DateValueFrom)
self.assertEqual(date.kind, DateValueTypes.FROM)
self.assertEqual(date.date, GregorianDate(1967))
self.assertEqual(str(date), "FROM 1967")
date = DateValue.parse("TO 1 JAN 2017")
self.assertIsInstance(date, DateValueTo)
self.assertEqual(date.kind, DateValueTypes.TO)
self.assertEqual(date.date, GregorianDate(2017, "JAN", 1))
self.assertEqual(str(date), "TO 1 JAN 2017")
date = DateValue.parse("FROM 1920 TO 2000")
self.assertIsInstance(date, DateValuePeriod)
self.assertEqual(date.kind, DateValueTypes.PERIOD)
self.assertEqual(date.date1, GregorianDate(1920))
self.assertEqual(date.date2, GregorianDate(2000))
self.assertEqual(str(date), "FROM 1920 TO 2000")
date = DateValue.parse("from mar 1920 to 1 apr 2000")
self.assertIsInstance(date, DateValuePeriod)
self.assertEqual(date.kind, DateValueTypes.PERIOD)
self.assertEqual(date.date1, GregorianDate(1920, "MAR"))
self.assertEqual(date.date2, GregorianDate(2000, "APR", 1))
self.assertEqual(str(date), "FROM MAR 1920 TO 1 APR 2000")
def test_013_date_parse_range(self):
"""Test date.DateValue class."""
date = DateValue.parse("BEF 1967B.C.")
self.assertIsInstance(date, DateValueBefore)
self.assertEqual(date.kind, DateValueTypes.BEFORE)
self.assertEqual(date.date, GregorianDate(1967, bc=True))
self.assertEqual(str(date), "BEFORE 1967 B.C.")
date = DateValue.parse("AFT 1 JAN 2017")
self.assertIsInstance(date, DateValueAfter)
self.assertEqual(date.kind, DateValueTypes.AFTER)
self.assertEqual(date.date, GregorianDate(2017, "JAN", 1))
self.assertEqual(str(date), "AFTER 1 JAN 2017")
date = DateValue.parse("BET @#DJULIAN@ 1600 AND 2000")
self.assertIsInstance(date, DateValueRange)
self.assertEqual(date.kind, DateValueTypes.RANGE)
self.assertEqual(date.date1, JulianDate(1600))
self.assertEqual(date.date2, GregorianDate(2000))
self.assertEqual(str(date), "BETWEEN @#DJULIAN@ 1600 AND 2000")
date = DateValue.parse("bet mar 1920 and apr 2000")
self.assertIsInstance(date, DateValueRange)
self.assertEqual(date.kind, DateValueTypes.RANGE)
self.assertEqual(date.date1, GregorianDate(1920, "MAR"))
self.assertEqual(date.date2, GregorianDate(2000, "APR"))
self.assertEqual(str(date), "BETWEEN MAR 1920 AND APR 2000")
def test_014_date_parse_approx(self):
"""Test date.DateValue class."""
dates = {"500 B.C.": GregorianDate(500, bc=True),
"JAN 2017": GregorianDate(2017, "JAN"),
"31 JAN 2017": GregorianDate(2017, "JAN", 31)}
approx = [
("ABT", "ABOUT", DateValueAbout, DateValueTypes.ABOUT),
("CAL", "CALCULATED", DateValueCalculated, DateValueTypes.CALCULATED),
("EST", "ESTIMATED", DateValueEstimated, DateValueTypes.ESTIMATED)
]
for appr, fmt, klass, typeEnum in approx:
for datestr, value in dates.items():
date = DateValue.parse(appr + " " + datestr)
self.assertIsInstance(date, klass)
self.assertEqual(date.kind, typeEnum)
self.assertEqual(str(date), fmt + " " + datestr)
self.assertEqual(date.date, value)
def test_015_date_parse_phrase(self):
"""Test date.DateValue class."""
date = DateValue.parse("(some phrase)")
self.assertIsInstance(date, DateValuePhrase)
self.assertEqual(date.kind, DateValueTypes.PHRASE)
self.assertEqual(date.phrase, "some phrase")
date = DateValue.parse("INT 1967 B.C. (some phrase)")
self.assertIsInstance(date, DateValueInterpreted)
self.assertEqual(date.kind, DateValueTypes.INTERPRETED)
self.assertEqual(date.date, GregorianDate(1967, bc=True))
self.assertEqual(date.phrase, "some phrase")
self.assertEqual(str(date), "INTERPRETED 1967 B.C. (some phrase)")
date = DateValue.parse("INT @#DGREGORIAN@ 1 JAN 2017 (some phrase)")
self.assertIsInstance(date, DateValueInterpreted)
self.assertEqual(date.kind, DateValueTypes.INTERPRETED)
self.assertEqual(date.date, GregorianDate(2017, "JAN", 1))
self.assertEqual(date.phrase, "some phrase")
self.assertEqual(str(date), "INTERPRETED 1 JAN 2017 (some phrase)")
def test_016_date_parse_simple(self):
"""Test date.DateValue class."""
date = DateValue.parse("1967 B.C.")
self.assertIsInstance(date, DateValueSimple)
self.assertEqual(date.kind, DateValueTypes.SIMPLE)
self.assertEqual(date.date, GregorianDate(1967, bc=True))
self.assertEqual(str(date), "1967 B.C.")
date = DateValue.parse("@#DGREGORIAN@ 1 JAN 2017")
self.assertIsInstance(date, DateValueSimple)
self.assertEqual(date.kind, DateValueTypes.SIMPLE)
self.assertEqual(date.date, GregorianDate(2017, "JAN", 1))
self.assertEqual(str(date), "1 JAN 2017")
def test_017_date_cmp(self):
"""Test date.Date class."""
dv = DateValue.parse("2016")
self.assertIsInstance(dv.key(), tuple)
self.assertEqual(dv.key(), (GregorianDate(2016), GregorianDate(2016)))
dv = DateValue.parse("31 DEC 2000")
self.assertIsInstance(dv.key(), tuple)
self.assertEqual(dv.key(), (GregorianDate(2000, "DEC", 31), GregorianDate(2000, "DEC", 31)))
dv = DateValue.parse("BET 31 DEC 2000 AND 1 JAN 2001")
self.assertIsInstance(dv.key(), tuple)
self.assertEqual(dv.key(), (GregorianDate(2000, "DEC", 31), GregorianDate(2001, "JAN", 1)))
# order of dates is messed up
dv = DateValue.parse("BET 31 DEC 2000 AND 1 JAN 2000")
self.assertIsInstance(dv.key(), tuple)
self.assertEqual(dv.key(), (GregorianDate(2000, "DEC", 31), GregorianDate(2000, "JAN", 1)))
self.assertTrue(DateValue.parse("2016") < DateValue.parse("2017"))
self.assertTrue(DateValue.parse("2 JAN 2016") > DateValue.parse("1 JAN 2016"))
self.assertTrue(DateValue.parse("BET 1900 AND 2000") < DateValue.parse("FROM 1920 TO 1999"))
# comparing simple date with range
self.assertTrue(DateValue.parse("1 JAN 2000") > DateValue.parse("BET 1 JAN 1999 AND 1 JAN 2000"))
self.assertNotEqual(DateValue.parse("1 JAN 2000"), DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001"))
self.assertTrue(DateValue.parse("1 JAN 2000") < DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001"))
self.assertTrue(DateValue.parse("1 JAN 2000") > DateValue.parse("BEF 1 JAN 2000"))
self.assertTrue(DateValue.parse("1 JAN 2000") > DateValue.parse("TO 1 JAN 2000"))
self.assertTrue(DateValue.parse("1 JAN 2000") < DateValue.parse("AFT 1 JAN 2000"))
self.assertTrue(DateValue.parse("1 JAN 2000") < DateValue.parse("FROM 1 JAN 2000"))
# comparing ranges
self.assertEqual(DateValue.parse("FROM 1 JAN 2000 TO 1 JAN 2001"),
DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001"))
self.assertTrue(DateValue.parse("FROM 1 JAN 1999 TO 1 JAN 2001") <
DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001"))
self.assertTrue(DateValue.parse("FROM 1 JAN 2000 TO 1 JAN 2002") >
DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001"))
# Less specific date compares later than more specific
self.assertTrue(DateValue.parse("2000") > DateValue.parse("31 DEC 2000"))
self.assertTrue(DateValue.parse("DEC 2000") > DateValue.parse("31 DEC 2000"))
# phrase is always later than any regular date
self.assertTrue(DateValue.parse("(Could be 1996 or 1998)") > DateValue.parse("2000"))
# "empty" date is always later than any regular date
self.assertTrue(DateValue.parse("") > DateValue.parse("2000"))
def test_018_date_parse_empty(self):
"""Test date.DateValue class."""
for value in (None, ""):
date = DateValue.parse(value)
self.assertIsInstance(date, DateValuePhrase)
self.assertEqual(date.kind, DateValueTypes.PHRASE)
self.assertIsNone(date.phrase)
self.assertEqual(str(date), "")
def test_019_date_value_visitor(self):
"""Test date.DateValue class."""
visitor = TestDateVisitor()
date1 = GregorianDate(2017, "JAN", 1)
date2 = GregorianDate(2017, "DEC", 31)
value = DateValueSimple(date1).accept(visitor)
self.assertEqual(value, ("simple", date1))
value = DateValueFrom(date1).accept(visitor)
self.assertEqual(value, ("from", date1))
value = DateValueTo(date1).accept(visitor)
self.assertEqual(value, ("to", date1))
value = DateValuePeriod(date1, date2).accept(visitor)
self.assertEqual(value, ("period", date1, date2))
value = DateValueBefore(date1).accept(visitor)
self.assertEqual(value, ("before", date1))
value = DateValueAfter(date1).accept(visitor)
self.assertEqual(value, ("after", date1))
value = DateValueRange(date1, date2).accept(visitor)
self.assertEqual(value, ("range", date1, date2))
value = DateValueAbout(date1).accept(visitor)
self.assertEqual(value, ("about", date1))
value = DateValueCalculated(date1).accept(visitor)
self.assertEqual(value, ("calculated", date1))
value = DateValueEstimated(date1).accept(visitor)
self.assertEqual(value, ("estimated", date1))
value = DateValueInterpreted(date1, "phrase").accept(visitor)
self.assertEqual(value, ("interpreted", date1, "phrase"))
value = DateValuePhrase("phrase").accept(visitor)
self.assertEqual(value, ("phrase", "phrase"))
def test_020_date_hash(self):
"""Test date.Date hash"""
dv1 = DateValue.parse("2016")
dv2 = DateValue.parse("2016")
self.assertEqual(hash(dv1), hash(dv2))
dv1 = DateValue.parse("31 DEC 2000")
dv2 = DateValue.parse("31 DEC 2000")
self.assertEqual(hash(dv1), hash(dv2))
dv1 = DateValue.parse("BET 31 DEC 2000 AND 1 JAN 2001")
dv2 = DateValue.parse("BET 31 DEC 2000 AND 1 JAN 2001")
self.assertEqual(hash(dv1), hash(dv2))
| [((3455, 3484), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {}), "(2017, 'OCT', 9)\n", (3468, 3484), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((3858, 3893), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""'], {'bc': '(True)'}), "(2017, 'OCT', bc=True)\n", (3871, 3893), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((4269, 4311), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1699)', '"""FEB"""'], {'dual_year': '(1700)'}), "(1699, 'FEB', dual_year=1700)\n", (4282, 4311), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((4690, 4706), 'ged4py.calendar.HebrewDate', 'HebrewDate', (['(5000)'], {}), '(5000)\n', (4700, 4706), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((5024, 5048), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""FRUC"""', '(1)'], {}), "(1, 'FRUC', 1)\n", (5034, 5048), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((5374, 5403), 'ged4py.calendar.JulianDate', 'JulianDate', (['(5)', '"""JAN"""'], {'bc': '(True)'}), "(5, 'JAN', bc=True)\n", (5384, 5403), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((5809, 5838), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {}), "(2017, 'OCT', 9)\n", (5822, 5838), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((5908, 5953), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1699)', '"""FEB"""', '(1)'], {'dual_year': '(1700)'}), "(1699, 'FEB', 1, dual_year=1700)\n", (5921, 5953), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6023, 6056), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(2017)', '"""VENT"""'], {'bc': '(True)'}), "(2017, 'VENT', bc=True)\n", (6033, 6056), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6126, 6153), 'ged4py.calendar.HebrewDate', 'HebrewDate', (['(2017)', '"""TSH"""', '(22)'], {}), "(2017, 'TSH', 22)\n", (6136, 6153), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6223, 6239), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1000)'], {}), '(1000)\n', (6233, 6239), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8697, 8726), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {}), "(2017, 'OCT', 9)\n", (8710, 8726), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8793, 8828), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""'], {'bc': '(True)'}), "(2017, 'OCT', bc=True)\n", (8806, 8828), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8898, 8943), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1699)', '"""JAN"""', '(1)'], {'dual_year': '(1700)'}), "(1699, 'JAN', 1, dual_year=1700)\n", (8911, 8943), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((9013, 9029), 'ged4py.calendar.HebrewDate', 'HebrewDate', (['(5000)'], {}), '(5000)\n', (9023, 9029), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((9101, 9125), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(1)'], {}), "(1, 'VEND', 1)\n", (9111, 9125), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((9203, 9229), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1582)', '"""OCT"""', '(5)'], {}), "(1582, 'OCT', 5)\n", (9213, 9229), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((9398, 9431), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""31 MAY 2020"""'], {}), "('31 MAY 2020')\n", (9416, 9431), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((9863, 9913), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DGREGORIAN@ 10 MAR 1698/99"""'], {}), "('@#DGREGORIAN@ 10 MAR 1698/99')\n", (9881, 9913), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((10367, 10403), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""10 MAR 1699/00"""'], {}), "('10 MAR 1699/00')\n", (10385, 10403), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((10682, 10723), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DJULIAN@ 100 B.C."""'], {}), "('@#DJULIAN@ 100 B.C.')\n", (10700, 10723), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((11102, 11149), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DFRENCH R@ 15 GERM 0001"""'], {}), "('@#DFRENCH R@ 15 GERM 0001')\n", (11120, 11149), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((11547, 11590), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DHEBREW@ 7 NSN 5000"""'], {}), "('@#DHEBREW@ 7 NSN 5000')\n", (11565, 11590), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((12662, 12691), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {}), "(2017, 'OCT', 9)\n", (12675, 12691), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((12798, 12814), 'ged4py.calendar.HebrewDate', 'HebrewDate', (['(5000)'], {}), '(5000)\n', (12808, 12814), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((12918, 12942), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(1)'], {}), "(1, 'VEND', 1)\n", (12928, 12942), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13046, 13072), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1582)', '"""OCT"""', '(5)'], {}), "(1582, 'OCT', 5)\n", (13056, 13072), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13810, 13839), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""not a date"""'], {}), "('not a date')\n", (13825, 13839), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((14156, 14184), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1967"""'], {}), "('FROM 1967')\n", (14171, 14184), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((14415, 14447), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""TO 1 JAN 2017"""'], {}), "('TO 1 JAN 2017')\n", (14430, 14447), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((14688, 14724), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1920 TO 2000"""'], {}), "('FROM 1920 TO 2000')\n", (14703, 14724), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((15026, 15072), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""from mar 1920 to 1 apr 2000"""'], {}), "('from mar 1920 to 1 apr 2000')\n", (15041, 15072), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((15484, 15515), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BEF 1967B.C."""'], {}), "('BEF 1967B.C.')\n", (15499, 15515), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((15766, 15799), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""AFT 1 JAN 2017"""'], {}), "('AFT 1 JAN 2017')\n", (15781, 15799), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((16049, 16096), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET @#DJULIAN@ 1600 AND 2000"""'], {}), "('BET @#DJULIAN@ 1600 AND 2000')\n", (16064, 16096), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((16408, 16452), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""bet mar 1920 and apr 2000"""'], {}), "('bet mar 1920 and apr 2000')\n", (16423, 16452), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((17768, 17800), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""(some phrase)"""'], {}), "('(some phrase)')\n", (17783, 17800), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((17982, 18028), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""INT 1967 B.C. (some phrase)"""'], {}), "('INT 1967 B.C. (some phrase)')\n", (17997, 18028), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((18361, 18422), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""INT @#DGREGORIAN@ 1 JAN 2017 (some phrase)"""'], {}), "('INT @#DGREGORIAN@ 1 JAN 2017 (some phrase)')\n", (18376, 18422), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((18841, 18869), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1967 B.C."""'], {}), "('1967 B.C.')\n", (18856, 18869), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((19113, 19156), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""@#DGREGORIAN@ 1 JAN 2017"""'], {}), "('@#DGREGORIAN@ 1 JAN 2017')\n", (19128, 19156), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((19470, 19493), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2016"""'], {}), "('2016')\n", (19485, 19493), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((19634, 19664), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""31 DEC 2000"""'], {}), "('31 DEC 2000')\n", (19649, 19664), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((19827, 19876), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 31 DEC 2000 AND 1 JAN 2001"""'], {}), "('BET 31 DEC 2000 AND 1 JAN 2001')\n", (19842, 19876), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20076, 20125), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 31 DEC 2000 AND 1 JAN 2000"""'], {}), "('BET 31 DEC 2000 AND 1 JAN 2000')\n", (20091, 20125), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22762, 22791), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (22775, 22791), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((22808, 22838), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""DEC"""', '(31)'], {}), "(2017, 'DEC', 31)\n", (22821, 22838), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((24267, 24290), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2016"""'], {}), "('2016')\n", (24282, 24290), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((24305, 24328), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2016"""'], {}), "('2016')\n", (24320, 24328), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((24391, 24421), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""31 DEC 2000"""'], {}), "('31 DEC 2000')\n", (24406, 24421), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((24436, 24466), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""31 DEC 2000"""'], {}), "('31 DEC 2000')\n", (24451, 24466), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((24529, 24578), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 31 DEC 2000 AND 1 JAN 2001"""'], {}), "('BET 31 DEC 2000 AND 1 JAN 2001')\n", (24544, 24578), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((24593, 24642), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 31 DEC 2000 AND 1 JAN 2001"""'], {}), "('BET 31 DEC 2000 AND 1 JAN 2001')\n", (24608, 24642), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((12061, 12097), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DROMAN@ 2020"""'], {}), "('@#DROMAN@ 2020')\n", (12079, 12097), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((12194, 12232), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DUNKNOWN@ 2020"""'], {}), "('@#DUNKNOWN@ 2020')\n", (12212, 12232), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((12342, 12382), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DJULIAN@ 2020/21"""'], {}), "('@#DJULIAN@ 2020/21')\n", (12360, 12382), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((12479, 12514), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""start of time"""'], {}), "('start of time')\n", (12497, 12514), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((14329, 14348), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1967)'], {}), '(1967)\n', (14342, 14348), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((14588, 14617), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (14601, 14617), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((14874, 14893), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1920)'], {}), '(1920)\n', (14887, 14893), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((14932, 14951), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)'], {}), '(2000)\n', (14945, 14951), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((15222, 15248), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1920)', '"""MAR"""'], {}), "(1920, 'MAR')\n", (15235, 15248), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((15287, 15316), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""APR"""', '(1)'], {}), "(2000, 'APR', 1)\n", (15300, 15316), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((15664, 15692), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1967)'], {'bc': '(True)'}), '(1967, bc=True)\n', (15677, 15692), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((15946, 15975), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (15959, 15975), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16244, 16260), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1600)'], {}), '(1600)\n', (16254, 16260), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16299, 16318), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)'], {}), '(2000)\n', (16312, 16318), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16600, 16626), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1920)', '"""MAR"""'], {}), "(1920, 'MAR')\n", (16613, 16626), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16665, 16691), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""APR"""'], {}), "(2000, 'APR')\n", (16678, 16691), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16876, 16903), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(500)'], {'bc': '(True)'}), '(500, bc=True)\n', (16889, 16903), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16934, 16960), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""'], {}), "(2017, 'JAN')\n", (16947, 16960), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16994, 17024), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(31)'], {}), "(2017, 'JAN', 31)\n", (17007, 17024), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((18187, 18215), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1967)'], {'bc': '(True)'}), '(1967, bc=True)\n', (18200, 18215), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((18581, 18610), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (18594, 18610), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19018, 19046), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1967)'], {'bc': '(True)'}), '(1967, bc=True)\n', (19031, 19046), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19305, 19334), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (19318, 19334), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((20715, 20744), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (20730, 20744), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20746, 20794), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1 JAN 2000 AND 1 JAN 2001"""'], {}), "('BET 1 JAN 2000 AND 1 JAN 2001')\n", (20761, 20794), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21319, 21367), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1 JAN 2000 TO 1 JAN 2001"""'], {}), "('FROM 1 JAN 2000 TO 1 JAN 2001')\n", (21334, 21367), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21394, 21442), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1 JAN 2000 AND 1 JAN 2001"""'], {}), "('BET 1 JAN 2000 AND 1 JAN 2001')\n", (21409, 21442), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22393, 22415), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['value'], {}), '(value)\n', (22408, 22415), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((6400, 6429), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2016)', '"""JAN"""', '(1)'], {}), "(2016, 'JAN', 1)\n", (6413, 6429), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6432, 6461), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6445, 6461), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6487, 6516), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6500, 6516), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6519, 6548), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""FEB"""', '(1)'], {}), "(2017, 'FEB', 1)\n", (6532, 6548), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6574, 6603), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6587, 6603), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6606, 6635), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(2)'], {}), "(2017, 'JAN', 2)\n", (6619, 6635), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6662, 6691), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6675, 6691), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6695, 6724), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(2)'], {}), "(2017, 'JAN', 2)\n", (6708, 6724), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6750, 6779), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(2)'], {}), "(2017, 'JAN', 2)\n", (6763, 6779), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6782, 6811), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6795, 6811), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6837, 6866), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(2)'], {}), "(2017, 'JAN', 2)\n", (6850, 6866), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6870, 6899), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6883, 6899), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6925, 6954), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6938, 6954), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6958, 6987), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6971, 6987), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7013, 7042), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (7026, 7042), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7046, 7075), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(2)'], {}), "(2017, 'JAN', 2)\n", (7059, 7075), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7188, 7214), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""'], {}), "(2017, 'JAN')\n", (7201, 7214), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7217, 7247), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(31)'], {}), "(2017, 'JAN', 31)\n", (7230, 7247), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7273, 7299), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""'], {}), "(2017, 'JAN')\n", (7286, 7299), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7302, 7331), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""FEB"""', '(1)'], {}), "(2017, 'FEB', 1)\n", (7315, 7331), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7443, 7462), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)'], {}), '(2017)\n', (7456, 7462), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7465, 7495), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""DEC"""', '(31)'], {}), "(2017, 'DEC', 31)\n", (7478, 7495), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7521, 7540), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)'], {}), '(2017)\n', (7534, 7540), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7543, 7572), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2018)', '"""JAN"""', '(1)'], {}), "(2018, 'JAN', 1)\n", (7556, 7572), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7619, 7648), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1700)', '"""JAN"""', '(1)'], {}), "(1700, 'JAN', 1)\n", (7632, 7648), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7652, 7697), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1699)', '"""JAN"""', '(1)'], {'dual_year': '(1700)'}), "(1699, 'JAN', 1, dual_year=1700)\n", (7665, 7697), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7769, 7799), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1582)', '"""OCT"""', '(15)'], {}), "(1582, 'OCT', 15)\n", (7782, 7799), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7803, 7829), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1582)', '"""OCT"""', '(5)'], {}), "(1582, 'OCT', 5)\n", (7813, 7829), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7855, 7885), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1582)', '"""OCT"""', '(16)'], {}), "(1582, 'OCT', 16)\n", (7868, 7885), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7888, 7914), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1582)', '"""OCT"""', '(5)'], {}), "(1582, 'OCT', 5)\n", (7898, 7914), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7940, 7966), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1582)', '"""OCT"""', '(6)'], {}), "(1582, 'OCT', 6)\n", (7950, 7966), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7969, 7999), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1582)', '"""OCT"""', '(15)'], {}), "(1582, 'OCT', 15)\n", (7982, 7999), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8025, 8055), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""JAN"""', '(14)'], {}), "(2000, 'JAN', 14)\n", (8038, 8055), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8059, 8085), 'ged4py.calendar.JulianDate', 'JulianDate', (['(2000)', '"""JAN"""', '(1)'], {}), "(2000, 'JAN', 1)\n", (8069, 8085), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8157, 8187), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1792)', '"""SEP"""', '(22)'], {}), "(1792, 'SEP', 22)\n", (8170, 8187), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8191, 8215), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(1)'], {}), "(1, 'VEND', 1)\n", (8201, 8215), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8241, 8271), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1792)', '"""SEP"""', '(23)'], {}), "(1792, 'SEP', 23)\n", (8254, 8271), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8274, 8298), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(1)'], {}), "(1, 'VEND', 1)\n", (8284, 8298), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8324, 8348), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(2)'], {}), "(1, 'VEND', 2)\n", (8334, 8348), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8351, 8381), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1792)', '"""SEP"""', '(22)'], {}), "(1792, 'SEP', 22)\n", (8364, 8381), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8407, 8437), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2020)', '"""SEP"""', '(21)'], {}), "(2020, 'SEP', 21)\n", (8420, 8437), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8441, 8467), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(228)', '"""COMP"""', '(5)'], {}), "(228, 'COMP', 5)\n", (8451, 8467), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8539, 8568), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2020)', '"""JAN"""', '(1)'], {}), "(2020, 'JAN', 1)\n", (8552, 8568), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8572, 8598), 'ged4py.calendar.HebrewDate', 'HebrewDate', (['(5780)', '"""SVN"""', '(4)'], {}), "(5780, 'SVN', 4)\n", (8582, 8598), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13273, 13302), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {}), "(2017, 'OCT', 9)\n", (13286, 13302), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13335, 13364), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {}), "(2017, 'OCT', 9)\n", (13348, 13364), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13397, 13435), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {'bc': '(True)'}), "(2017, 'OCT', 9, bc=True)\n", (13410, 13435), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13468, 13506), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {'bc': '(True)'}), "(2017, 'OCT', 9, bc=True)\n", (13481, 13506), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13539, 13563), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(1)'], {}), "(1, 'VEND', 1)\n", (13549, 13563), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13596, 13620), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(1)'], {}), "(1, 'VEND', 1)\n", (13606, 13620), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13653, 13666), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)'], {}), '(1)\n', (13663, 13666), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13699, 13712), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)'], {}), '(1)\n', (13709, 13712), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((17409, 17446), 'ged4py.date.DateValue.parse', 'DateValue.parse', (["(appr + ' ' + datestr)"], {}), "(appr + ' ' + datestr)\n", (17424, 17446), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((19577, 19596), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2016)'], {}), '(2016)\n', (19590, 19596), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19598, 19617), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2016)'], {}), '(2016)\n', (19611, 19617), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19748, 19778), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""DEC"""', '(31)'], {}), "(2000, 'DEC', 31)\n", (19761, 19778), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19780, 19810), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""DEC"""', '(31)'], {}), "(2000, 'DEC', 31)\n", (19793, 19810), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19960, 19990), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""DEC"""', '(31)'], {}), "(2000, 'DEC', 31)\n", (19973, 19990), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19992, 20021), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2001)', '"""JAN"""', '(1)'], {}), "(2001, 'JAN', 1)\n", (20005, 20021), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((20209, 20239), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""DEC"""', '(31)'], {}), "(2000, 'DEC', 31)\n", (20222, 20239), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((20241, 20270), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""JAN"""', '(1)'], {}), "(2000, 'JAN', 1)\n", (20254, 20270), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((20298, 20321), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2016"""'], {}), "('2016')\n", (20313, 20321), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20324, 20347), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2017"""'], {}), "('2017')\n", (20339, 20347), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20373, 20402), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2 JAN 2016"""'], {}), "('2 JAN 2016')\n", (20388, 20402), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20405, 20434), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2016"""'], {}), "('1 JAN 2016')\n", (20420, 20434), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20460, 20496), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1900 AND 2000"""'], {}), "('BET 1900 AND 2000')\n", (20475, 20496), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20499, 20535), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1920 TO 1999"""'], {}), "('FROM 1920 TO 1999')\n", (20514, 20535), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20605, 20634), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (20620, 20634), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20637, 20685), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1 JAN 1999 AND 1 JAN 2000"""'], {}), "('BET 1 JAN 1999 AND 1 JAN 2000')\n", (20652, 20685), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20820, 20849), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (20835, 20849), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20852, 20900), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1 JAN 2000 AND 1 JAN 2001"""'], {}), "('BET 1 JAN 2000 AND 1 JAN 2001')\n", (20867, 20900), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20926, 20955), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (20941, 20955), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20958, 20991), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BEF 1 JAN 2000"""'], {}), "('BEF 1 JAN 2000')\n", (20973, 20991), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21017, 21046), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (21032, 21046), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21049, 21081), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""TO 1 JAN 2000"""'], {}), "('TO 1 JAN 2000')\n", (21064, 21081), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21107, 21136), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (21122, 21136), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21139, 21172), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""AFT 1 JAN 2000"""'], {}), "('AFT 1 JAN 2000')\n", (21154, 21172), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21198, 21227), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (21213, 21227), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21230, 21264), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1 JAN 2000"""'], {}), "('FROM 1 JAN 2000')\n", (21245, 21264), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21468, 21516), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1 JAN 1999 TO 1 JAN 2001"""'], {}), "('FROM 1 JAN 1999 TO 1 JAN 2001')\n", (21483, 21516), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21543, 21591), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1 JAN 2000 AND 1 JAN 2001"""'], {}), "('BET 1 JAN 2000 AND 1 JAN 2001')\n", (21558, 21591), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21617, 21665), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1 JAN 2000 TO 1 JAN 2002"""'], {}), "('FROM 1 JAN 2000 TO 1 JAN 2002')\n", (21632, 21665), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21692, 21740), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1 JAN 2000 AND 1 JAN 2001"""'], {}), "('BET 1 JAN 2000 AND 1 JAN 2001')\n", (21707, 21740), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21830, 21853), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2000"""'], {}), "('2000')\n", (21845, 21853), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21856, 21886), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""31 DEC 2000"""'], {}), "('31 DEC 2000')\n", (21871, 21886), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21912, 21939), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""DEC 2000"""'], {}), "('DEC 2000')\n", (21927, 21939), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21942, 21972), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""31 DEC 2000"""'], {}), "('31 DEC 2000')\n", (21957, 21972), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22054, 22096), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""(Could be 1996 or 1998)"""'], {}), "('(Could be 1996 or 1998)')\n", (22069, 22096), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22099, 22122), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2000"""'], {}), "('2000')\n", (22114, 22122), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22210, 22229), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['""""""'], {}), "('')\n", (22225, 22229), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22232, 22255), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2000"""'], {}), "('2000')\n", (22247, 22255), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22856, 22878), 'ged4py.date.DateValueSimple', 'DateValueSimple', (['date1'], {}), '(date1)\n', (22871, 22878), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22963, 22983), 'ged4py.date.DateValueFrom', 'DateValueFrom', (['date1'], {}), '(date1)\n', (22976, 22983), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23066, 23084), 'ged4py.date.DateValueTo', 'DateValueTo', (['date1'], {}), '(date1)\n', (23077, 23084), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23165, 23194), 'ged4py.date.DateValuePeriod', 'DateValuePeriod', (['date1', 'date2'], {}), '(date1, date2)\n', (23180, 23194), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23286, 23308), 'ged4py.date.DateValueBefore', 'DateValueBefore', (['date1'], {}), '(date1)\n', (23301, 23308), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23393, 23414), 'ged4py.date.DateValueAfter', 'DateValueAfter', (['date1'], {}), '(date1)\n', (23407, 23414), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23498, 23526), 'ged4py.date.DateValueRange', 'DateValueRange', (['date1', 'date2'], {}), '(date1, date2)\n', (23512, 23526), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23617, 23638), 'ged4py.date.DateValueAbout', 'DateValueAbout', (['date1'], {}), '(date1)\n', (23631, 23638), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23722, 23748), 'ged4py.date.DateValueCalculated', 'DateValueCalculated', (['date1'], {}), '(date1)\n', (23741, 23748), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23837, 23862), 'ged4py.date.DateValueEstimated', 'DateValueEstimated', (['date1'], {}), '(date1)\n', (23855, 23862), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23950, 23987), 'ged4py.date.DateValueInterpreted', 'DateValueInterpreted', (['date1', '"""phrase"""'], {}), "(date1, 'phrase')\n", (23970, 23987), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((24087, 24112), 'ged4py.date.DateValuePhrase', 'DateValuePhrase', (['"""phrase"""'], {}), "('phrase')\n", (24102, 24112), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n')] |
Dunkledore/quart | src/quart/local.py | 803c8678b083895f4ece35fccb6aca56e189ee0a | from __future__ import annotations
import asyncio
import copy
from contextvars import ContextVar # noqa # contextvars not understood as stdlib
from typing import Any # noqa # contextvars not understood as stdlib
from typing import Callable, Dict, Optional
class TaskLocal:
"""An object local to the current task."""
__slots__ = ("_storage",)
def __init__(self) -> None:
# Note as __setattr__ is overidden below, use the object __setattr__
object.__setattr__(self, "_storage", ContextVar("storage"))
def __getattr__(self, name: str) -> Any:
values = self._storage.get({})
try:
return values[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
values = self._storage.get({})
values[name] = value
self._storage.set(values)
def __delattr__(self, name: str) -> None:
values = self._storage.get({})
try:
del values[name]
self._storage.set(values)
except KeyError:
raise AttributeError(name)
@staticmethod
def _task_identity() -> int:
loop = asyncio.get_event_loop()
if loop.is_running():
task = asyncio.current_task()
task_id = id(task)
return task_id
else:
return 0
class LocalStack:
def __init__(self) -> None:
self._task_local = TaskLocal()
def push(self, value: Any) -> None:
stack = getattr(self._task_local, "stack", None)
if stack is None:
self._task_local.stack = stack = []
stack.append(value)
def pop(self) -> Any:
stack = getattr(self._task_local, "stack", None)
if stack is None or stack == []:
return None
else:
return stack.pop()
@property
def top(self) -> Any:
try:
return self._task_local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalProxy:
"""Proxy to a task local object."""
__slots__ = ("__dict__", "__local", "__wrapped__")
def __init__(self, local: Callable, name: Optional[str] = None) -> None:
# Note as __setattr__ is overidden below, use the object __setattr__
object.__setattr__(self, "__LocalProxy_local", local)
object.__setattr__(self, "__wrapped__", local)
object.__setattr__(self, "__name__", name)
def _get_current_object(self) -> Any:
return object.__getattribute__(self, "__LocalProxy_local")()
@property
def __dict__(self) -> Dict[str, Any]: # type: ignore
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError("__dict__")
def __repr__(self) -> str:
try:
obj = self._get_current_object()
except RuntimeError:
return "<%s unbound>" % self.__class__.__name__
return repr(obj)
def __bool__(self) -> bool:
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __dir__(self) -> Any:
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name: Any) -> Any:
if name == "__members__":
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key: Any, value: Any) -> Any:
self._get_current_object()[key] = value
def __delitem__(self, key: Any) -> Any:
del self._get_current_object()[key]
async def __aiter__(self) -> Any:
async for x in self._get_current_object():
yield x
__setattr__ = lambda x, n, v: setattr( # noqa: E731, E501
x._get_current_object(), n, v # type: ignore
)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n) # type: ignore # noqa: E731
__str__ = lambda x: str(x._get_current_object()) # type: ignore # noqa: E731
__lt__ = lambda x, o: x._get_current_object() < o # noqa: E731
__le__ = lambda x, o: x._get_current_object() <= o # noqa: E731
__eq__ = lambda x, o: x._get_current_object() == o # type: ignore # noqa: E731
__ne__ = lambda x, o: x._get_current_object() != o # type: ignore # noqa: E731
__gt__ = lambda x, o: x._get_current_object() > o # noqa: E731
__ge__ = lambda x, o: x._get_current_object() >= o # noqa: E731
__hash__ = lambda x: hash(x._get_current_object()) # type: ignore # noqa: E731
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw) # noqa: E731
__len__ = lambda x: len(x._get_current_object()) # noqa: E731
__getitem__ = lambda x, i: x._get_current_object()[i] # noqa: E731
__iter__ = lambda x: iter(x._get_current_object()) # noqa: E731
__contains__ = lambda x, i: i in x._get_current_object() # noqa: E731
__add__ = lambda x, o: x._get_current_object() + o # noqa: E731
__sub__ = lambda x, o: x._get_current_object() - o # noqa: E731
__mul__ = lambda x, o: x._get_current_object() * o # noqa: E731
__floordiv__ = lambda x, o: x._get_current_object() // o # noqa: E731
__mod__ = lambda x, o: x._get_current_object() % o # noqa: E731
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o) # noqa: E731
__pow__ = lambda x, o: x._get_current_object() ** o # noqa: E731
__lshift__ = lambda x, o: x._get_current_object() << o # noqa: E731
__rshift__ = lambda x, o: x._get_current_object() >> o # noqa: E731
__and__ = lambda x, o: x._get_current_object() & o # noqa: E731
__xor__ = lambda x, o: x._get_current_object() ^ o # noqa: E731
__or__ = lambda x, o: x._get_current_object() | o # noqa: E731
__div__ = lambda x, o: x._get_current_object().__div__(o) # noqa: E731
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o) # noqa: E731
__neg__ = lambda x: -(x._get_current_object()) # noqa: E731
__pos__ = lambda x: +(x._get_current_object()) # noqa: E731
__abs__ = lambda x: abs(x._get_current_object()) # noqa: E731
__invert__ = lambda x: ~(x._get_current_object()) # noqa: E731
__complex__ = lambda x: complex(x._get_current_object()) # noqa: E731
__int__ = lambda x: int(x._get_current_object()) # noqa: E731
__float__ = lambda x: float(x._get_current_object()) # noqa: E731
__oct__ = lambda x: oct(x._get_current_object()) # noqa: E731
__hex__ = lambda x: hex(x._get_current_object()) # noqa: E731
__index__ = lambda x: x._get_current_object().__index__() # noqa: E731
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o) # noqa: E731
__enter__ = lambda x: x._get_current_object().__enter__() # noqa: E731
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) # noqa: E731
__radd__ = lambda x, o: o + x._get_current_object() # noqa: E731
__rsub__ = lambda x, o: o - x._get_current_object() # noqa: E731
__rmul__ = lambda x, o: o * x._get_current_object() # noqa: E731
__rdiv__ = lambda x, o: o / x._get_current_object() # noqa: E731
__rtruediv__ = __rdiv__
__rfloordiv__ = lambda x, o: o // x._get_current_object() # noqa: E731
__rmod__ = lambda x, o: o % x._get_current_object() # noqa: E731
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o) # noqa: E731
__copy__ = lambda x: copy.copy(x._get_current_object()) # noqa: E731
__deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo) # noqa: E731
__await__ = lambda x: x._get_current_object().__await__() # noqa: E731
| [((1186, 1210), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1208, 1210), False, 'import asyncio\n'), ((511, 532), 'contextvars.ContextVar', 'ContextVar', (['"""storage"""'], {}), "('storage')\n", (521, 532), False, 'from contextvars import ContextVar\n'), ((1260, 1282), 'asyncio.current_task', 'asyncio.current_task', ([], {}), '()\n', (1280, 1282), False, 'import asyncio\n')] |
searobbersduck/pytorch-3dunet | pytorch3dunet/unet3d/predictor.py | 5bb8ed2b6966b2cd06b1dc676b62d1ad98329305 | import time
import h5py
import hdbscan
import numpy as np
import torch
from sklearn.cluster import MeanShift
from pytorch3dunet.datasets.hdf5 import SliceBuilder
from pytorch3dunet.unet3d.utils import get_logger
from pytorch3dunet.unet3d.utils import unpad
logger = get_logger('UNet3DPredictor')
class _AbstractPredictor:
def __init__(self, model, loader, output_file, config, **kwargs):
self.model = model
self.loader = loader
self.output_file = output_file
self.config = config
self.predictor_config = kwargs
@staticmethod
def _volume_shape(dataset):
# TODO: support multiple internal datasets
raw = dataset.raws[0]
if raw.ndim == 3:
return raw.shape
else:
return raw.shape[1:]
@staticmethod
def _get_output_dataset_names(number_of_datasets, prefix='predictions'):
if number_of_datasets == 1:
return [prefix]
else:
return [f'{prefix}{i}' for i in range(number_of_datasets)]
def predict(self):
raise NotImplementedError
class StandardPredictor(_AbstractPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predictions from the network are kept in memory. If the results from the network don't fit in into RAM
use `LazyPredictor` instead.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
def __init__(self, model, loader, output_file, config, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
def predict(self):
out_channels = self.config['model'].get('out_channels')
if out_channels is None:
out_channels = self.config['model']['dt_out_channels']
prediction_channel = self.config.get('prediction_channel', None)
if prediction_channel is not None:
logger.info(f"Using only channel '{prediction_channel}' from the network output")
device = self.config['device']
output_heads = self.config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(self.loader)} batches...')
# dimensionality of the the output predictions
volume_shape = self._volume_shape(self.loader.dataset)
if prediction_channel is None:
prediction_maps_shape = (out_channels,) + volume_shape
else:
# single channel prediction map
prediction_maps_shape = (1,) + volume_shape
logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')
avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True)
logger.info(f'Avoid block artifacts: {avoid_block_artifacts}')
# create destination H5 file
h5_output_file = h5py.File(self.output_file, 'w')
# allocate prediction and normalization arrays
logger.info('Allocating prediction and normalization arrays...')
prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape,
output_heads, h5_output_file)
# Sets the module in evaluation mode explicitly (necessary for batchnorm/dropout layers if present)
self.model.eval()
# Set the `testing=true` flag otherwise the final Softmax/Sigmoid won't be applied!
self.model.testing = True
# Run predictions on the entire input dataset
with torch.no_grad():
for batch, indices in self.loader:
# send batch to device
batch = batch.to(device)
# forward pass
predictions = self.model(batch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
predictions = [predictions]
# for each output head
for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps,
normalization_masks):
# convert to numpy array
prediction = prediction.cpu().numpy()
# for each batch sample
for pred, index in zip(prediction, indices):
# save patch index: (C,D,H,W)
if prediction_channel is None:
channel_slice = slice(0, out_channels)
else:
channel_slice = slice(0, 1)
index = (channel_slice,) + index
if prediction_channel is not None:
# use only the 'prediction_channel'
logger.info(f"Using channel '{prediction_channel}'...")
pred = np.expand_dims(pred[prediction_channel], axis=0)
logger.info(f'Saving predictions for slice:{index}...')
if avoid_block_artifacts:
# unpad in order to avoid block artifacts in the output probability maps
u_prediction, u_index = unpad(pred, index, volume_shape)
# accumulate probabilities into the output prediction array
prediction_map[u_index] += u_prediction
# count voxel visits for normalization
normalization_mask[u_index] += 1
else:
# accumulate probabilities into the output prediction array
prediction_map[index] += pred
# count voxel visits for normalization
normalization_mask[index] += 1
# save results to
self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset)
# close the output H5 file
h5_output_file.close()
def _allocate_prediction_maps(self, output_shape, output_heads, output_file):
# initialize the output prediction arrays
prediction_maps = [np.zeros(output_shape, dtype='float32') for _ in range(output_heads)]
# initialize normalization mask in order to average out probabilities of overlapping patches
normalization_masks = [np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)]
return prediction_maps, normalization_masks
def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset):
# save probability maps
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks,
prediction_datasets):
prediction_map = prediction_map / normalization_mask
if dataset.mirror_padding:
pad_width = dataset.pad_width
logger.info(f'Dataset loaded with mirror padding, pad_width: {pad_width}. Cropping before saving...')
prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width]
logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')
output_file.create_dataset(prediction_dataset, data=prediction_map, compression="gzip")
class LazyPredictor(StandardPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predicted patches are directly saved into the H5 and they won't be stored in memory. Since this predictor
is slower than the `StandardPredictor` it should only be used when the predicted volume does not fit into RAM.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
def __init__(self, model, loader, output_file, config, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
def _allocate_prediction_maps(self, output_shape, output_heads, output_file):
# allocate datasets for probability maps
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
prediction_maps = [
output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True,
compression='gzip')
for dataset_name in prediction_datasets]
# allocate datasets for normalization masks
normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')
normalization_masks = [
output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True,
compression='gzip')
for dataset_name in normalization_datasets]
return prediction_maps, normalization_masks
def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset):
if dataset.mirror_padding:
logger.warn(
f'Mirror padding unsupported in LazyPredictor. Output predictions will be padded with pad_width: {dataset.pad_width}')
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')
# normalize the prediction_maps inside the H5
for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps,
normalization_masks,
prediction_datasets,
normalization_datasets):
# split the volume into 4 parts and load each into the memory separately
logger.info(f'Normalizing {prediction_dataset}...')
z, y, x = prediction_map.shape[1:]
# take slices which are 1/27 of the original volume
patch_shape = (z // 3, y // 3, x // 3)
for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape):
logger.info(f'Normalizing slice: {index}')
prediction_map[index] /= normalization_mask[index]
# make sure to reset the slice that has been visited already in order to avoid 'double' normalization
# when the patches overlap with each other
normalization_mask[index] = 1
logger.info(f'Deleting {normalization_dataset}...')
del output_file[normalization_dataset]
class EmbeddingsPredictor(_AbstractPredictor):
"""
Applies the embedding model on the given dataset and saves the result in the `output_file` in the H5 format.
The resulting volume is the segmentation itself (not the embedding vectors) obtained by clustering embeddings
with HDBSCAN or MeanShift algorithm patch by patch and then stitching the patches together.
"""
def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
self.iou_threshold = iou_threshold
self.noise_label = noise_label
self.clustering = clustering
assert clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift are supported'
logger.info(f'IoU threshold: {iou_threshold}')
self.clustering_name = clustering
self.clustering = self._get_clustering(clustering, kwargs)
def predict(self):
device = self.config['device']
output_heads = self.config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(self.loader)} patches...')
# dimensionality of the the output segmentation
volume_shape = self._volume_shape(self.loader.dataset)
logger.info(f'The shape of the output segmentation (DHW): {volume_shape}')
logger.info('Allocating segmentation array...')
# initialize the output prediction arrays
output_segmentations = [np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)]
# initialize visited_voxels arrays
visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)]
# Sets the module in evaluation mode explicitly
self.model.eval()
self.model.testing = True
# Run predictions on the entire input dataset
with torch.no_grad():
for batch, indices in self.loader:
# logger.info(f'Predicting embeddings for slice:{index}')
# send batch to device
batch = batch.to(device)
# forward pass
embeddings = self.model(batch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
embeddings = [embeddings]
for prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations,
visited_voxels_arrays):
# convert to numpy array
prediction = prediction.cpu().numpy()
# iterate sequentially because of the current simple stitching that we're using
for pred, index in zip(prediction, indices):
# convert embeddings to segmentation with hdbscan clustering
segmentation = self._embeddings_to_segmentation(pred)
# stitch patches
self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array)
# save results
with h5py.File(self.output_file, 'w') as output_file:
prediction_datasets = self._get_output_dataset_names(output_heads,
prefix=f'segmentation/{self.clustering_name}')
for output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets):
logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')
output_file.create_dataset(prediction_dataset, data=output_segmentation, compression="gzip")
def _embeddings_to_segmentation(self, embeddings):
"""
Cluster embeddings vectors with HDBSCAN and return the segmented volume.
Args:
embeddings (ndarray): 4D (CDHW) embeddings tensor
Returns:
3D (DHW) segmentation
"""
# shape of the output segmentation
output_shape = embeddings.shape[1:]
# reshape (C, D, H, W) -> (C, D * H * W) and transpose -> (D * H * W, C)
flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose()
logger.info('Clustering embeddings...')
# perform clustering and reshape in order to get the segmentation volume
start = time.time()
clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape)
logger.info(
f'Number of clusters found by {self.clustering}: {np.max(clusters)}. Duration: {time.time() - start} sec.')
return clusters
def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array):
"""
Given the `segmentation` patch, its `index` in the `output_segmentation` array and the array visited voxels
merge the segmented patch (`segmentation`) into the `output_segmentation`
Args:
segmentation (ndarray): segmented patch
index (tuple): position of the patch inside `output_segmentation` volume
output_segmentation (ndarray): current state of the output segmentation
visited_voxels_array (ndarray): array of voxels visited so far (same size as `output_segmentation`); visited
voxels will be marked by a number greater than 0
"""
index = tuple(index)
# get new unassigned label
max_label = np.max(output_segmentation) + 1
# make sure there are no clashes between current segmentation patch and the output_segmentation
# but keep the noise label
noise_mask = segmentation == self.noise_label
segmentation += int(max_label)
segmentation[noise_mask] = self.noise_label
# get the overlap mask in the current patch
overlap_mask = visited_voxels_array[index] > 0
# get the new labels inside the overlap_mask
new_labels = np.unique(segmentation[overlap_mask])
merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation)
# relabel new segmentation with the merged labels
for current_label, new_label in merged_labels:
segmentation[segmentation == new_label] = current_label
# update the output_segmentation
output_segmentation[index] = segmentation
# visit the patch
visited_voxels_array[index] += 1
def _merge_labels(self, current_segmentation, new_labels, new_segmentation):
def _most_frequent_label(labels):
unique, counts = np.unique(labels, return_counts=True)
ind = np.argmax(counts)
return unique[ind]
result = []
# iterate over new_labels and merge regions if the IoU exceeds a given threshold
for new_label in new_labels:
# skip 'noise' label assigned by hdbscan
if new_label == self.noise_label:
continue
new_label_mask = new_segmentation == new_label
# get only the most frequent overlapping label
most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask])
# skip 'noise' label
if most_frequent_label == self.noise_label:
continue
current_label_mask = current_segmentation == most_frequent_label
# compute Jaccard index
iou = np.bitwise_and(new_label_mask, current_label_mask).sum() / np.bitwise_or(new_label_mask,
current_label_mask).sum()
if iou > self.iou_threshold:
# merge labels
result.append((most_frequent_label, new_label))
return result
def _get_clustering(self, clustering_alg, kwargs):
logger.info(f'Using {clustering_alg} for clustering')
if clustering_alg == 'hdbscan':
min_cluster_size = kwargs.get('min_cluster_size', 50)
min_samples = kwargs.get('min_samples', None),
metric = kwargs.get('metric', 'euclidean')
cluster_selection_method = kwargs.get('cluster_selection_method', 'eom')
logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size}, min_samples: {min_samples}')
return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric,
cluster_selection_method=cluster_selection_method)
else:
bandwidth = kwargs['bandwidth']
logger.info(f'MeanShift params: bandwidth: {bandwidth}, bin_seeding: True')
# use fast MeanShift with bin seeding
return MeanShift(bandwidth=bandwidth, bin_seeding=True)
| [((269, 298), 'pytorch3dunet.unet3d.utils.get_logger', 'get_logger', (['"""UNet3DPredictor"""'], {}), "('UNet3DPredictor')\n", (279, 298), False, 'from pytorch3dunet.unet3d.utils import get_logger\n'), ((3293, 3325), 'h5py.File', 'h5py.File', (['self.output_file', '"""w"""'], {}), "(self.output_file, 'w')\n", (3302, 3325), False, 'import h5py\n'), ((16480, 16491), 'time.time', 'time.time', ([], {}), '()\n', (16489, 16491), False, 'import time\n'), ((18073, 18110), 'numpy.unique', 'np.unique', (['segmentation[overlap_mask]'], {}), '(segmentation[overlap_mask])\n', (18082, 18110), True, 'import numpy as np\n'), ((3991, 4006), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4004, 4006), False, 'import torch\n'), ((6737, 6776), 'numpy.zeros', 'np.zeros', (['output_shape'], {'dtype': '"""float32"""'}), "(output_shape, dtype='float32')\n", (6745, 6776), True, 'import numpy as np\n'), ((6939, 6976), 'numpy.zeros', 'np.zeros', (['output_shape'], {'dtype': '"""uint8"""'}), "(output_shape, dtype='uint8')\n", (6947, 6976), True, 'import numpy as np\n'), ((11449, 11546), 'pytorch3dunet.datasets.hdf5.SliceBuilder._build_slices', 'SliceBuilder._build_slices', (['prediction_map'], {'patch_shape': 'patch_shape', 'stride_shape': 'patch_shape'}), '(prediction_map, patch_shape=patch_shape,\n stride_shape=patch_shape)\n', (11475, 11546), False, 'from pytorch3dunet.datasets.hdf5 import SliceBuilder\n'), ((13521, 13558), 'numpy.zeros', 'np.zeros', (['volume_shape'], {'dtype': '"""int32"""'}), "(volume_shape, dtype='int32')\n", (13529, 13558), True, 'import numpy as np\n'), ((13665, 13702), 'numpy.zeros', 'np.zeros', (['volume_shape'], {'dtype': '"""uint8"""'}), "(volume_shape, dtype='uint8')\n", (13673, 13702), True, 'import numpy as np\n'), ((13917, 13932), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13930, 13932), False, 'import torch\n'), ((15241, 15273), 'h5py.File', 'h5py.File', (['self.output_file', '"""w"""'], {}), "(self.output_file, 'w')\n", (15250, 15273), False, 'import h5py\n'), ((17576, 17603), 'numpy.max', 'np.max', (['output_segmentation'], {}), '(output_segmentation)\n', (17582, 17603), True, 'import numpy as np\n'), ((18700, 18737), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (18709, 18737), True, 'import numpy as np\n'), ((18756, 18773), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (18765, 18773), True, 'import numpy as np\n'), ((20450, 20595), 'hdbscan.HDBSCAN', 'hdbscan.HDBSCAN', ([], {'min_cluster_size': 'min_cluster_size', 'min_samples': 'min_samples', 'metric': 'metric', 'cluster_selection_method': 'cluster_selection_method'}), '(min_cluster_size=min_cluster_size, min_samples=min_samples,\n metric=metric, cluster_selection_method=cluster_selection_method)\n', (20465, 20595), False, 'import hdbscan\n'), ((20842, 20890), 'sklearn.cluster.MeanShift', 'MeanShift', ([], {'bandwidth': 'bandwidth', 'bin_seeding': '(True)'}), '(bandwidth=bandwidth, bin_seeding=True)\n', (20851, 20890), False, 'from sklearn.cluster import MeanShift\n'), ((16666, 16682), 'numpy.max', 'np.max', (['clusters'], {}), '(clusters)\n', (16672, 16682), True, 'import numpy as np\n'), ((16696, 16707), 'time.time', 'time.time', ([], {}), '()\n', (16705, 16707), False, 'import time\n'), ((19532, 19582), 'numpy.bitwise_and', 'np.bitwise_and', (['new_label_mask', 'current_label_mask'], {}), '(new_label_mask, current_label_mask)\n', (19546, 19582), True, 'import numpy as np\n'), ((19591, 19640), 'numpy.bitwise_or', 'np.bitwise_or', (['new_label_mask', 'current_label_mask'], {}), '(new_label_mask, current_label_mask)\n', (19604, 19640), True, 'import numpy as np\n'), ((5415, 5463), 'numpy.expand_dims', 'np.expand_dims', (['pred[prediction_channel]'], {'axis': '(0)'}), '(pred[prediction_channel], axis=0)\n', (5429, 5463), True, 'import numpy as np\n'), ((5749, 5781), 'pytorch3dunet.unet3d.utils.unpad', 'unpad', (['pred', 'index', 'volume_shape'], {}), '(pred, index, volume_shape)\n', (5754, 5781), False, 'from pytorch3dunet.unet3d.utils import unpad\n')] |
electronicvisions/spack | var/spack/repos/builtin/packages/visionary-dev-tools/package.py | d6121eb35b4948f7d8aef7ec7a305a5123a7439e | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os.path as osp
class VisionaryDevTools(Package):
"""Developer convenience packages common to all visionary
development meta packages. Application specific build tools belong
to the dedicated meta packages."""
homepage = ''
# some random tarball, to make `spack fetch --dependencies visionary-defaults` work
url = 'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz'
# This is only a dummy tarball (see difference between version numbers)
# TODO: as soon as a MetaPackage-concept has been merged, please update this package
version('1.0', '372ce038842f20bf0ae02de50c26e85d', url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz')
depends_on('ack')
depends_on('autoconf')
depends_on('automake')
depends_on('bash-completion')
depends_on('bazel')
depends_on('bear')
depends_on('cairo +X')
depends_on('cloc')
depends_on('cmake')
depends_on('connect-proxy')
depends_on('cppcheck +htmlreport')
depends_on('cquery')
depends_on('doxygen+graphviz')
depends_on('emacs ~X')
depends_on('gdb')
depends_on('genpybind')
depends_on('git+tcltk')
depends_on('git-fat-git')
depends_on('gtkplus')
depends_on('imagemagick')
depends_on('jq')
depends_on('libpcap')
depends_on('libtool')
depends_on('llvm+visionary+python~libcxx build_type=Release')
depends_on('mercurial')
depends_on('mosh')
depends_on('munge')
depends_on('ncdu')
depends_on('node-js')
depends_on('octave+fftw')
depends_on('openssh')
depends_on('pigz')
depends_on('pkg-config')
depends_on('py-autopep8')
depends_on('py-black', when="^[email protected]:")
depends_on('py-configargparse')
depends_on('py-doxypypy')
depends_on('py-flake8')
depends_on('py-gdbgui')
depends_on('py-git-review')
depends_on('py-ipython')
depends_on('py-jedi')
depends_on('py-junit-xml')
depends_on('py-language-server')
depends_on('py-line-profiler')
depends_on('py-nose')
depends_on('py-nose2')
depends_on('py-memory-profiler')
depends_on('py-pudb')
depends_on('py-pylint@:1.999.999', when="^python@:2.999.999")
depends_on('py-pylint', when="^[email protected]:")
depends_on('py-pyserial')
depends_on('py-pytest')
depends_on('py-pytest-xdist')
depends_on('py-ranger-fm')
depends_on('py-sqlalchemy')
depends_on('py-virtualenv')
depends_on('py-xmlrunner')
depends_on('py-yq')
depends_on('rtags')
depends_on('tar')
depends_on('texinfo')
# ECM (2020-05-14): removed 'the-silver-searcher' due to build fail on [email protected]
depends_on('tig')
depends_on('time')
depends_on('tmux')
depends_on('units')
depends_on('valgrind')
depends_on('verilator')
depends_on('vim +python +ruby +perl +cscope +huge +x')
depends_on('visionary-xilinx')
depends_on('wget')
depends_on('yaml-cpp+shared')
depends_on('zsh')
def install(self, spec, prefix):
mkdirp(prefix.etc)
# store a copy of this package.
filename = osp.basename(osp.dirname(__file__)) # gives name of parent folder
install(__file__, join_path(prefix.etc, filename + '.py'))
# we could create some filesystem view here?
| [((3313, 3334), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (3324, 3334), True, 'import os.path as osp\n')] |
MartaLoBalastegui/XICRA | extra/convertBAMtoPILFER.py | 74a7e74379c7e1b3fc1360d2c609994e884ee37a | #usr/bin/env python
## useful imports
import time
import io
import os
import re
import sys
from sys import argv
import subprocess
## ARGV
if len (sys.argv) < 5:
print ("\nUsage:")
print ("python3 %s bam_file folder bedtools_bin samtools_bin logfile\n" %os.path.realpath(__file__))
exit()
bam_file = os.path.abspath(argv[1])
folder = argv[2]
bedtools_exe = argv[3]
samtools_exe = argv[4]
logFile = argv[5]
# start
output_file = open(logFile, 'a')
output_file.write("\nConvert BAM to Pilfer Input file:\n")
## Variables
dirname_name = os.path.dirname(bam_file)
split_name = os.path.splitext( os.path.basename(bam_file) )
bed_file = folder + '/' + split_name[0] + '.bed'
sam_file = folder + '/' + split_name[0] + '.sam'
pilfer_tmp = folder + '/' + split_name[0] + '.tmp.pilfer.bed'
pilfer_file = folder + '/' + split_name[0] + '.pilfer.bed'
## START
print ("\n+ Converting BAM file into PILFER input file")
## generate bed file with bedtools bamtobed -i bam_file
if (os.path.isfile(bed_file)):
print ("\t+ File %s already exists" %bed_file)
else:
cmd_bedtools = "%s bamtobed -i %s > %s" %(bedtools_exe, bam_file, bed_file)
output_file.write(cmd_bedtools)
output_file.write("\n")
try:
subprocess.check_output(cmd_bedtools, shell = True)
except Exception as exc:
print ('***ERROR:')
print (cmd_bedtools)
print('bedtools command generated an exception: %s' %exc)
exit()
## generate samtools
if (os.path.isfile(sam_file)):
print ("\t+ File %s already exists" %sam_file)
else:
cmd_samtools = "%s view %s > %s" %(samtools_exe, bam_file, sam_file)
output_file.write(cmd_samtools)
output_file.write("\n")
try:
subprocess.check_output(cmd_samtools, shell = True)
except Exception as exc:
print ('***ERROR:')
print (cmd_samtools)
print('samtools view command generated an exception: %s' %exc)
exit()
## generate paste filter tmp file
if (os.path.isfile(pilfer_tmp)):
print ("\t+ File %s already exists" %pilfer_tmp)
else:
## paste Aligned.sortedByCoord.out.bed Aligned.sortedByCoord.out.sam | awk -v "OFS=\t" '{print $1, $2, $3, $16, $6}'
cmd_paste = "paste %s %s | awk -v \"OFS=\t\" \'{print $1, $2, $3, $16, $6}\' > %s" %(bed_file, sam_file, pilfer_tmp)
output_file.write(cmd_paste)
output_file.write("\n")
try:
subprocess.check_output(cmd_paste, shell = True)
except Exception as exc:
print ('***ERROR:')
print (cmd_paste)
print('paste bed sam command generated an exception: %s' %exc)
exit()
## parse pilfer tmp file
counter = 1
previous_line = ()
# Open file OUT
output_file = open(pilfer_file, 'w')
# Open file IN
fileHandler = open (pilfer_tmp, "r")
while True:
# Get next line from file
line = fileHandler.readline().strip()
# If line is empty then end of file reached
if not line :
break;
seq = line.split('\t')[3]
real_seq = seq.split('::PU')
seq_len = len(str(real_seq[0]))
## Discard smaller
if (previous_line):
if (previous_line == line):
line = previous_line
counter += 1
else:
line_split = previous_line.split('\t')
output_file.write('%s\t%s\t%s\t%s::PI\t%s\t%s\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4]))
#counter += 1
while True:
#get next line
next_line = fileHandler.readline().strip()
if (next_line == line):
counter += 1
else:
line_split = line.split('\t')
output_file.write('%s\t%s\t%s\t%s::PI\t%s\t%s\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4]))
previous_line = next_line
counter = 1
break;
## close and finish
fileHandler.close()
output_file.close()
| [((305, 329), 'os.path.abspath', 'os.path.abspath', (['argv[1]'], {}), '(argv[1])\n', (320, 329), False, 'import os\n'), ((542, 567), 'os.path.dirname', 'os.path.dirname', (['bam_file'], {}), '(bam_file)\n', (557, 567), False, 'import os\n'), ((976, 1000), 'os.path.isfile', 'os.path.isfile', (['bed_file'], {}), '(bed_file)\n', (990, 1000), False, 'import os\n'), ((1422, 1446), 'os.path.isfile', 'os.path.isfile', (['sam_file'], {}), '(sam_file)\n', (1436, 1446), False, 'import os\n'), ((1877, 1903), 'os.path.isfile', 'os.path.isfile', (['pilfer_tmp'], {}), '(pilfer_tmp)\n', (1891, 1903), False, 'import os\n'), ((599, 625), 'os.path.basename', 'os.path.basename', (['bam_file'], {}), '(bam_file)\n', (615, 625), False, 'import os\n'), ((1202, 1251), 'subprocess.check_output', 'subprocess.check_output', (['cmd_bedtools'], {'shell': '(True)'}), '(cmd_bedtools, shell=True)\n', (1225, 1251), False, 'import subprocess\n'), ((1641, 1690), 'subprocess.check_output', 'subprocess.check_output', (['cmd_samtools'], {'shell': '(True)'}), '(cmd_samtools, shell=True)\n', (1664, 1690), False, 'import subprocess\n'), ((2264, 2310), 'subprocess.check_output', 'subprocess.check_output', (['cmd_paste'], {'shell': '(True)'}), '(cmd_paste, shell=True)\n', (2287, 2310), False, 'import subprocess\n'), ((257, 283), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (273, 283), False, 'import os\n')] |
nikhilsamninan/python-files | day7/main5list.py | 15198459081097058a939b40b5e8ef754e578fe0 | a="Betty Bought a butter the butter was bitter so betty bought a better butter which was not bitter"
v=[a[-1] for a in a.split() if(len(a)%2==0)]
print(v) | [] |
lcarnevale/proxy-mqtt2influx | app/reader.py | 89b3cd354b465d7451556a2d2ec49ac8688b4f17 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
"""Writer class based on InfluxDB
This implementation does its best to follow the Robert Martin's Clean code guidelines.
The comments follows the Google Python Style Guide:
https://github.com/google/styleguide/blob/gh-pages/pyguide.md
"""
__copyright__ = 'Copyright 2021, FCRlab at University of Messina'
__author__ = 'Lorenzo Carnevale <[email protected]>'
__credits__ = ''
__description__ = 'Writer class based on InfluxDB'
import time
import logging
import threading
import persistqueue
from datetime import datetime
from influxdb_client.client.write_api import SYNCHRONOUS
from influxdb_client import InfluxDBClient, Point, WritePrecision
class Reader:
def __init__(self, host, port, token, organization, bucket, mutex, verbosity):
self.__url = "http://%s:%s" % (host, port)
self.__token = token
self.__organization = organization
self.__bucket = bucket
self.__mutex = mutex
self.__reader = None
self.__setup_logging(verbosity)
def __setup_logging(self, verbosity):
format = "%(asctime)s %(filename)s:%(lineno)d %(levelname)s - %(message)s"
filename='log/mqtt2influx.log'
datefmt = "%d/%m/%Y %H:%M:%S"
level = logging.INFO
if (verbosity):
level = logging.DEBUG
logging.basicConfig(filename=filename, filemode='a', format=format, level=level, datefmt=datefmt)
def setup(self):
self.__reader = threading.Thread(
target = self.__reader_job,
args = (self.__url, self.__token, self.__organization, self.__bucket)
)
def __reader_job(self, url, token, organization, bucket):
self.__mutex.acquire()
q = persistqueue.SQLiteQueue('data', multithreading=True, auto_commit=True)
self.__mutex.release()
client = InfluxDBClient(url=url, token=token)
write_api = client.write_api(write_options=SYNCHRONOUS)
try:
while (True):
raw_data = q.get()
logging.debug("Just got new data")
logging.debug("Parsing data points")
data = [
{
"measurement": raw_data['measurement'],
"tags": raw_data['tags'],
"fields": raw_data['fields'],
"time": raw_data['time']
}
]
write_api.write(bucket, organization, data)
logging.info("Data into InfluxDB")
time.sleep(0.3)
except KeyboardInterrupt:
pass
def start(self):
self.__reader.start() | [((1349, 1451), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'filename', 'filemode': '"""a"""', 'format': 'format', 'level': 'level', 'datefmt': 'datefmt'}), "(filename=filename, filemode='a', format=format, level=\n level, datefmt=datefmt)\n", (1368, 1451), False, 'import logging\n'), ((1494, 1609), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.__reader_job', 'args': '(self.__url, self.__token, self.__organization, self.__bucket)'}), '(target=self.__reader_job, args=(self.__url, self.__token,\n self.__organization, self.__bucket))\n', (1510, 1609), False, 'import threading\n'), ((1755, 1826), 'persistqueue.SQLiteQueue', 'persistqueue.SQLiteQueue', (['"""data"""'], {'multithreading': '(True)', 'auto_commit': '(True)'}), "('data', multithreading=True, auto_commit=True)\n", (1779, 1826), False, 'import persistqueue\n'), ((1876, 1912), 'influxdb_client.InfluxDBClient', 'InfluxDBClient', ([], {'url': 'url', 'token': 'token'}), '(url=url, token=token)\n', (1890, 1912), False, 'from influxdb_client import InfluxDBClient, Point, WritePrecision\n'), ((2076, 2110), 'logging.debug', 'logging.debug', (['"""Just got new data"""'], {}), "('Just got new data')\n", (2089, 2110), False, 'import logging\n'), ((2128, 2164), 'logging.debug', 'logging.debug', (['"""Parsing data points"""'], {}), "('Parsing data points')\n", (2141, 2164), False, 'import logging\n'), ((2548, 2582), 'logging.info', 'logging.info', (['"""Data into InfluxDB"""'], {}), "('Data into InfluxDB')\n", (2560, 2582), False, 'import logging\n'), ((2600, 2615), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (2610, 2615), False, 'import time\n')] |
DottaPaperella/TALight | example_problems/tutorial/tiling_mxn-boards_with_1x2-boards/services/tell_if_tilable/tell_if_tilable_server.py | 580322c3121c9acde9827f996fd4e39e31d93a6f | #!/usr/bin/env python3
from sys import stderr, exit, argv
from random import randrange
#from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
# METADATA OF THIS TAL_SERVICE:
problem="tiling_mxn-boards_with_1x2-boards"
service="is_tilable"
args_list = [
('m',int),
('n',int),
('my_conjecture',str),
('h',int),
('k',int),
('lang',str),
('ISATTY',bool),
]
ENV =Env(problem, service, args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
TAc.print(LANG.opening_msg, "green")
# START CODING YOUR SERVICE:
assert ENV['h']==1
assert ENV['k']==2
print()
if (ENV['m'] * ENV['n']) % 2 == 1:
if ENV['my_conjecture'] == "yes":
TAc.NO()
print(LANG.render_feedback("FALSE-is-not-tilable", f"Contrary to what you have asserted, the {ENV['m']}x{ENV['n']}-grid is NOT tilable. If you are not convinced you can submit a tiling of that grid to the service 'check_my_tiling'."))
if ENV['my_conjecture'] == "no":
TAc.OK()
print(LANG.render_feedback("TRUE-is-not-tilable", f"You are perfecty right: the {ENV['m']}x{ENV['n']}-grid is NOT tilable."))
if (ENV['m'] * ENV['n']) % 2 == 0:
if ENV['my_conjecture'] == "yes":
TAc.OK()
print(LANG.render_feedback("TRUE-is-tilable", f"We agree on the fact that the {ENV['m']}x{ENV['n']}-grid is tilable. If you want to exhibit us a tiling for this grid you can submit it to the service 'check_my_tiling'."))
if ENV['my_conjecture'] == "no":
TAc.NO()
print(LANG.render_feedback("FALSE-is-tilable", f"No, the {ENV['m']}x{ENV['n']}-grid is tilable. If you can not believe a tiling of the {ENV['m']}x{ENV['n']}-grid exists try the service 'gimme_hints_on_a_tiling'."))
exit(0)
| [((413, 445), 'multilanguage.Env', 'Env', (['problem', 'service', 'args_list'], {}), '(problem, service, args_list)\n', (416, 445), False, 'from multilanguage import Env, Lang, TALcolors\n'), ((451, 465), 'multilanguage.TALcolors', 'TALcolors', (['ENV'], {}), '(ENV)\n', (460, 465), False, 'from multilanguage import Env, Lang, TALcolors\n'), ((1773, 1780), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (1777, 1780), False, 'from sys import stderr, exit, argv\n')] |
Subsets and Splits