filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_13830
|
import json
from xml.sax.saxutils import quoteattr
from django.utils.safestring import mark_safe
from . import field
from . import container
class FilterListContainerMixin(object):
template_name = 'django_cradmin/uicontainer/filterlist/filterlist.django.html'
def initialize(self,
id_attribute='id',
class_name=None,
select_mode=None,
auto_load_first_page=True,
get_items_api_url=None,
components=None,
initially_selected_item_ids=None):
self._id_attribute = id_attribute
self._class_name = class_name
self._select_mode = select_mode
self._auto_load_first_page = auto_load_first_page
self._get_items_api_url = get_items_api_url
self._components = components or []
self._initially_selected_item_ids = initially_selected_item_ids or []
def get_config_dict(self):
return {
'idAttribute': self._id_attribute,
'className': self._class_name,
'selectMode': self._select_mode,
'autoLoadFirstPage': self._auto_load_first_page,
'getItemsApiUrl': self._get_items_api_url,
'components': self._components,
'initiallySelectedItemIds': self._initially_selected_item_ids
}
@property
def widget_config_json(self):
return mark_safe(quoteattr(json.dumps(self.get_config_dict())))
class FilterListField(FilterListContainerMixin, field.BaseFieldRenderable):
def __init__(self,
id_attribute='id',
class_name=None,
select_mode=None,
auto_load_first_page=True,
get_items_api_url=None,
components=None,
initially_selected_item_ids=None,
**kwargs):
self.initialize(id_attribute=id_attribute,
class_name=class_name,
select_mode=select_mode,
auto_load_first_page=auto_load_first_page,
get_items_api_url=get_items_api_url,
components=components,
initially_selected_item_ids=initially_selected_item_ids)
super(FilterListField, self).__init__(**kwargs)
def should_render_as_child_of_label(self):
return False
class FilterListContainer(FilterListContainerMixin, container.AbstractContainerRenderable):
def __init__(self,
id_attribute='id',
class_name=None,
select_mode=None,
auto_load_first_page=True,
get_items_api_url=None,
components=None,
initially_selected_item_ids=None,
**kwargs):
self.initialize(id_attribute=id_attribute,
class_name=class_name,
select_mode=select_mode,
auto_load_first_page=auto_load_first_page,
get_items_api_url=get_items_api_url,
components=components,
initially_selected_item_ids=initially_selected_item_ids)
super(FilterListContainer, self).__init__(**kwargs)
|
the-stack_106_13832
|
from mimesis_stats.providers.multivariable import MultiVariable
def test_dependent_variables():
names = ["response", "count"]
combinations = [("Yes", 123), ("No", None)]
weights = [0, 1]
provider = MultiVariable()
expected_result = {"response": "No", "count": None}
result = provider.dependent_variables(names, options=combinations, weights=weights)
assert result == expected_result
expected_result = {"response": "Yes", "count": 123}
result = provider.dependent_variables(names, combinations, weights=list(reversed(weights)))
assert result == expected_result
|
the-stack_106_13833
|
# --------------------------------------------------------------------------- #
# Author: Ibrahim Odumas
# All Rights Reserved
# Open-source, free to copy
# --------------------------------------------------------------------------- #
import utils
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tabulate import tabulate
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
# ---------- Plot Setting ---------- #
COLOR = sns.color_palette()
FIGSIZE = (10, 6)
FONTSIZE = 20
ALPHA_PLT = 1
# ---------------------------------- #
# ---------- Load Data ------------- #
_TRAIN = TRAIN_DF = pd.read_csv(os.path.join(utils.ROOT_DIR, "quoradata", "train.csv"))
_TEST = pd.read_csv(os.path.join(utils.ROOT_DIR, "quoradata", "test.csv"))
columns = ["question1", "question2"]
TRAIN_X = _TRAIN[columns]
TRAIN_Y = _TRAIN.is_duplicate
TEST_X = _TEST[columns]
del _TRAIN, _TEST
# ---------------------------------- #
def generate_word_cloud(text):
wc = WordCloud(
background_color="white",
max_words=20,
stopwords=stopwords,
contour_width=3,
contour_color="steelblue",
)
wc.generate(text)
plt.imshow(wc, interpolation="bilinear")
plt.title(text, fontsize=FONTSIZE)
plt.axis("off")
plt.show()
if __name__ == "__main__":
# ---------- STOPWORDS ------------- #
stopwords = set(STOPWORDS)
stopwords.add("said")
# ---------------------------------- #
# ------- Analyzing Train set ------ #
# 1. get the a table of 0 and 1 in the class label
train_cl_tb = TRAIN_Y.value_counts()
print(tabulate(pd.DataFrame(train_cl_tb), tablefmt="psql", headers="keys"))
plt.figure(figsize=FIGSIZE)
sns.barplot(train_cl_tb.index, train_cl_tb.values, alpha=ALPHA_PLT, color=COLOR[3])
plt.ylabel("Frequency", fontsize=FONTSIZE)
plt.xlabel("Is Duplicate - (Response Variable)", fontsize=FONTSIZE)
plt.show()
# 2. sample size of Train vs. Test
plt.figure(figsize=FIGSIZE)
sns.barplot(
["Train", "Test"],
[TRAIN_X.shape[0], TEST_X.shape[0]],
alpha=ALPHA_PLT,
color=COLOR[3],
)
plt.ylabel("Frequency", fontsize=FONTSIZE)
plt.xlabel("Sample Size", fontsize=FONTSIZE)
plt.show()
# ------ END Analyzing training set ---- #
# ------- Generate Word Cloud ----------------------- #
text1 = TRAIN_X.question1[0] # Que1 with is_duplicate = 0
text2 = TRAIN_X.question2[0] # Que2 with is_duplicate = 0
text3 = TRAIN_X.question1[5] # Que1 with is_duplicate = 1
text4 = TRAIN_X.question2[5] # Que2 with is_duplicate = 1
generate_word_cloud(text1)
generate_word_cloud(text2)
generate_word_cloud(text3)
generate_word_cloud(text4)
# ------- END Generate Word Cloud ----------------------- #
|
the-stack_106_13835
|
import torch
from utils.loading_utils import load_model, get_device
import numpy as np
import argparse
import pandas as pd
from utils.event_readers import FixedSizeEventReader, FixedDurationEventReader
from utils.inference_utils import events_to_voxel_grid, events_to_voxel_grid_pytorch
from utils.timers import Timer
from image_reconstructor import ImageReconstructor
from options.inference_options import set_inference_options
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Evaluating a trained network')
parser.add_argument('-c', '--path_to_model', required=True, type=str,
help='path to model weights')
parser.add_argument('-i', '--input_file', required=True, type=str)
parser.add_argument('--fixed_duration', dest='fixed_duration', action='store_true')
parser.set_defaults(fixed_duration=False)
parser.add_argument('-N', '--window_size', default=None, type=int,
help="Size of each event window, in number of events. Ignored if --fixed_duration=True")
parser.add_argument('-T', '--window_duration', default=33.33, type=float,
help="Duration of each event window, in milliseconds. Ignored if --fixed_duration=False")
parser.add_argument('--num_events_per_pixel', default=0.35, type=float,
help='in case N (window size) is not specified, it will be \
automatically computed as N = width * height * num_events_per_pixel')
parser.add_argument('--skipevents', default=0, type=int)
parser.add_argument('--suboffset', default=0, type=int)
parser.add_argument('--compute_voxel_grid_on_cpu', dest='compute_voxel_grid_on_cpu', action='store_true')
parser.set_defaults(compute_voxel_grid_on_cpu=False)
set_inference_options(parser)
args = parser.parse_args()
# Read sensor size from the first first line of the event file
path_to_events = args.input_file
header = pd.read_csv(path_to_events, delim_whitespace=True, header=None, names=['width', 'height'],
dtype={'width': np.int, 'height': np.int},
nrows=1)
width, height = header.values[0]
print('Sensor size: {} x {}'.format(width, height))
# Load model
model = load_model(args.path_to_model)
device = get_device(args.use_gpu)
model = model.to(device)
model.eval()
reconstructor = ImageReconstructor(model, height, width, model.num_bins, args)
""" Read chunks of events using Pandas """
# Loop through the events and reconstruct images
N = args.window_size
if not args.fixed_duration:
if N is None:
N = int(width * height * args.num_events_per_pixel)
print('Will use {} events per tensor (automatically estimated with num_events_per_pixel={:0.2f}).'.format(
N, args.num_events_per_pixel))
else:
print('Will use {} events per tensor (user-specified)'.format(N))
mean_num_events_per_pixel = float(N) / float(width * height)
if mean_num_events_per_pixel < 0.1:
print('!!Warning!! the number of events used ({}) seems to be low compared to the sensor size. \
The reconstruction results might be suboptimal.'.format(N))
elif mean_num_events_per_pixel > 1.5:
print('!!Warning!! the number of events used ({}) seems to be high compared to the sensor size. \
The reconstruction results might be suboptimal.'.format(N))
initial_offset = args.skipevents
sub_offset = args.suboffset
start_index = initial_offset + sub_offset
if args.compute_voxel_grid_on_cpu:
print('Will compute voxel grid on CPU.')
if args.fixed_duration:
event_window_iterator = FixedDurationEventReader(path_to_events,
duration_ms=args.window_duration,
start_index=start_index)
else:
event_window_iterator = FixedSizeEventReader(path_to_events, num_events=N, start_index=start_index)
with Timer('Processing entire dataset'):
for event_window in event_window_iterator:
last_timestamp = event_window[-1, 0]
with Timer('Building event tensor'):
if args.compute_voxel_grid_on_cpu:
event_tensor = events_to_voxel_grid(event_window,
num_bins=model.num_bins,
width=width,
height=height)
event_tensor = torch.from_numpy(event_tensor)
else:
event_tensor = events_to_voxel_grid_pytorch(event_window,
num_bins=model.num_bins,
width=width,
height=height,
device=device)
num_events_in_window = event_window.shape[0]
reconstructor.update_reconstruction(event_tensor, start_index + num_events_in_window, last_timestamp)
start_index += num_events_in_window
|
the-stack_106_13836
|
"""
短信登陆telegram
下载设备数据,命令行
下载信息数据库,反馈为文本结构体
create by judy 2018/10/29
update by judy 2019/03/07
新增回调函数,删除读取后的文件
修改查询账号是否在线
update by judy 2020/05/18
即将新增边下边读功能,可能会新增很多函数,这里做了一些拆分
不然那就实在是太长了
"""
import datetime
import json
import re
import subprocess
import threading
import traceback
from pathlib import Path
from datacontract.ecommandstatus import ECommandStatus
from datacontract.idowndataset import Task, EBackResult
# 联系人信息
from idownclient.clientdatafeedback import CONTACT, CONTACT_ONE, ICHATGROUP, ICHATGROUP_ONE, ICHATLOG, ICHATLOG_ONE, \
IdownLoginLog, IdownLoginLog_ONE, PROFILE, RESOURCES, EResourceType
from idownclient.spider.appcfg import AppCfg
from .telegrambase import TelegramBase
class SpiderTelegram(TelegramBase):
def __init__(self, tsk: Task, appcfg: AppCfg, clientid):
TelegramBase.__init__(self, tsk, appcfg, clientid)
def _sms_login(self):
if not self._environment:
return self._environment
# login_res = False
is_login = self._is_login()
if is_login:
login_res = True
else:
login_res = self.__sms_login()
return login_res
def __sms_login(self):
# login
# 默认带了+86的
args = self._common_args(self.task.phone)
p = self._run_telegram(
*args,
cmdtype='login',
taskid=self.task.batchid,
# stdout=fsout,
# stderr=fserr,
)
# 读取输出
process_login = threading.Thread(
target=self._login_process, args=(p,))
process_login.start()
process_flush = threading.Thread(target=self._std_flush, args=(p,))
process_flush.start()
process_err = threading.Thread(target=self._read_err_log, args=(p,))
process_err.start()
try:
if self._sectimeout > 0:
process_login.join(self._sectimeout)
if process_login.isAlive and process_login.is_alive():
self._logger.error(f"timeout sec:{self._sectimeout}")
else:
process_login.join()
except Exception:
self._logger.error(traceback.format_exc())
if p is not None:
p.kill()
finally:
p.poll()
p.terminate()
process_flush.join()
process_err.join()
return self._result
def _online_check(self):
"""
查询telegram的账号是否在线,
telegram账号需要预先登陆
:return:
"""
# if not self._environment:
# return self._environment
# 查询的手机号
args = ["-file {}".format(self.task.globaltelcode + self.task.phone)]
# 已经登录的账号
res = [
f"-account {self.task.preglobaltelcode + self.task.preaccount}",
f"-target {self.accountsdir}",
f"-phone {'Honor9'}",
]
args.extend(res)
p = self._run_telegram(
*args,
cmdtype='find_online',
taskid=self.task.batchid,
# stdout=fsout,
# stderr=fserr,
)
# 读取error
process_err = threading.Thread(target=self._read_err_log, args=(p,))
process_err.start()
# 刷新
process_flush = threading.Thread(target=self._std_flush, args=(p,))
process_flush.start()
self._stopsigin = False
while True:
line = p.stdout.readline()
if line is not None and line != '':
msg: str = line.strip().rstrip()
if not msg.startswith("#"):
self._logger.info(f"{msg}")
continue
retcode = msg
idx = msg.find(' ')
if idx < 0:
idx = msg.find('\t')
if idx > 0:
retcode = msg[:idx].strip().rstrip()
if retcode == '#@1000':
self._logger.info(f"result code: {msg}")
self._write_task_back(ECommandStatus.Failed, msg[6:])
break
elif retcode == '#@21':
self._logger.info(f"result code: {msg}")
self._write_task_back(ECommandStatus.Succeed, "目标账号在线", result=EBackResult.Online)
break
elif retcode == '#@22':
self._logger.info(f"result code: {msg}")
if '0' in msg:
# 0 既然表示在线为啥要归类到离线里
self._write_task_back(ECommandStatus.Succeed, "目标账号在线", result=EBackResult.Online)
elif '-1' in msg:
self._write_task_back(ECommandStatus.Succeed, "目标账号已经很久没有登陆", result=EBackResult.Offline)
else:
self._write_task_back(ECommandStatus.Succeed, "目标账号离线", result=EBackResult.Offline)
break
elif retcode == '#@24':
self._logger.info(f"result code: {msg}")
self._write_task_back(ECommandStatus.Succeed, "账号没有注册", result=EBackResult.UnRegisterd)
break
else:
self._logger.info(f"result code: {msg}")
self._write_task_back(ECommandStatus.Failed, msg)
break
exitcode = p.wait()
self._logger.info(f"program return code: {str(exitcode)}")
self._stopsigin = True
return
def _check_registration(self):
"""
没必要,查询账号是否在线的功能和register是一样的,
telegram账号默认手机号都注册了
:return:
"""
self._write_task_back(ECommandStatus.Succeed, description="telegram默认所有手机号都注册了", result=EBackResult.Registerd)
return
def _logout(self) -> bool:
logout_res = False
args = self._common_args(self.task.phone)
p = self._run_telegram(
*args,
cmdtype='logout',
taskid=self.task.batchid,
# stdout=fsout,
# stderr=fserr,
)
# 读取error
process_err = threading.Thread(target=self._read_err_log, args=(p,))
process_err.start()
# 刷新
process_flush = threading.Thread(target=self._std_flush, args=(p,))
process_flush.start()
self._stopsigin = False
while True:
line = p.stdout.readline()
if line is not None and line != '':
msg: str = line.strip().rstrip()
if not msg.startswith("#"):
self._logger.info("{}".format(msg))
continue
retcode = msg
idx = msg.find(' ')
if idx < 0:
idx = msg.find('\t')
if idx > 0:
retcode = msg[:idx].strip().rstrip()
if retcode == '#@1000':
self._logger.info(f"result code: {msg}")
self._write_task_back(ECommandStatus.Failed, msg)
break
elif retcode == '#@51':
self._logger.info(f"result code: {msg}")
logout_res = True
break
elif retcode == '#@52':
self._logger.info(f"result code: {msg}")
break
else:
self._logger.info(f"result code: {msg}")
break
exitcode = p.wait()
self._logger.info(f"program return code: {str(exitcode)}")
self._stopsigin = True
return logout_res
def _is_login(self):
"""
测试telegram是否需要重新登陆
:return:
"""
is_login_status = False
p = None
try:
args = ["-file {}".format(self.task.globaltelcode + self.task.phone)]
args.extend(self._common_args(self.task.phone))
p = self._run_telegram(
*args,
cmdtype='find_online',
taskid=self.task.batchid,
# stdout=fsout,
# stderr=fserr,
)
# 读取error
process_err = threading.Thread(target=self._read_err_log, args=(p,))
process_err.start()
# 刷新
process_flush = threading.Thread(target=self._std_flush, args=(p,))
process_flush.start()
self._stopsigin = False
while True:
line = p.stdout.readline()
if line is not None and line != '':
msg: str = line.strip().rstrip()
if not msg.startswith("#"):
self._logger.info(f"{msg}")
continue
# retcode = msg
# idx = msg.find(' ')
# if idx < 0:
# idx = msg.find('\t')
# if idx > 0:
# retcode = msg[:idx].strip().rstrip()
if '#@21' in msg or '#@22' in msg or '#@23' in msg or '#@24' in msg:
self._logger.info(f"result code: {msg}")
is_login_status = True
break
else:
self._logger.info(f"result code: {msg}")
break
except Exception:
self._logger.error(traceback.format_exc())
if p is not None:
p.kill()
finally:
if p is not None:
exitcode = p.wait()
self._logger.info(f"program return code: {str(exitcode)}")
self._stopsigin = True
return is_login_status
def _login_process(self, p: subprocess.Popen):
if p is None or not isinstance(p, subprocess.Popen):
raise Exception("subprocess.Popen object is None while dealing the stdout stream")
self._stopsigin = False
try:
while True:
line = p.stdout.readline()
if line is not None and line != '':
msg: str = line.strip().rstrip()
if not msg.startswith("#"):
self._logger.info(f"{msg}")
continue
retcode = msg
idx = msg.find(' ')
if idx < 0:
idx = msg.find('\t')
if idx > 0:
retcode = msg[:idx].strip().rstrip()
if retcode == '#@31':
self._logger.info(f"result code: {msg}")
try:
vercode = self._get_vercode()
except:
# 验证码超时做的事情
vercode = '00000'
p.stdin.write(f"{vercode}\n")
p.stdin.flush()
elif retcode == '#@36':
self._logger.info(f'result code: {msg}')
elif retcode == '#@32':
# 登陆成功
self._logger.info(f"result code: {msg}")
self._result = True
break
else:
self._logger.info(f"result code: {msg}")
self._result = False
break
except Exception:
self._logger.error(traceback.format_exc())
self._result = False
if p is not None:
p.kill()
finally:
exitcode = p.wait()
self._logger.info(f"program return code: {str(exitcode)}")
self._stopsigin = True
return
def _download(self):
# 先下载数据库是为了给userid赋值
# 下载数据库, user表,charts表,messages表
try:
for dbdata in self._download_sqlitdb():
yield dbdata
except:
self._logger.error(f"Error downloading database data, err:{traceback.format_exc()}")
# 下载设备列表
try:
for clientlist in self.__get_loginlog():
yield clientlist
except:
self._logger.error(f"Error downloading device list, err:{traceback.format_exc()}")
def __get_loginlog(self):
args = self._common_args(self.task.phone)
p = self._run_telegram(*args, taskid=self.task.batchid, cmdtype='client_list')
# 读取error
process_err = threading.Thread(target=self._read_err_log, args=(p,))
process_err.start()
# 刷新
process_flush = threading.Thread(target=self._std_flush, args=(p,))
process_flush.start()
clientlist = []
# 读取数据
self._stopsigin = False
try:
while True:
line = p.stdout.readline()
if line is not None and line != '':
msg: str = line.strip().rstrip()
if not msg.startswith("#"):
self._logger.info(f"{msg}")
continue
retcode = msg
idx = msg.find(' ')
if idx < 0:
idx = msg.find('\t')
if idx > 0:
retcode = msg[:idx].strip().rstrip()
if '#@71' in msg:
retcode = '#@71'
if retcode == '#@71':
clientlist.append(msg)
elif retcode == '#@72':
for msgline in clientlist:
if msgline is None or msgline == '':
continue
if not msgline.startswith('#@71'):
continue
tmp = msgline.replace('#@71', '').strip().rstrip()
lines = json.loads(tmp)
if lines is None or len(lines) == 0:
continue
idown_login_log_all = IdownLoginLog(
self._clientid,
self.task,
self.task.apptype)
for line in lines:
login_log = IdownLoginLog_ONE(self.task, self.task.apptype, self._userid)
if line.get('country') is not None:
login_log.country = line['country']
if line['region'].strip() != '':
login_log.region = line['region']
if line.get('dateCreated') is not None:
login_log.logintime = str(datetime.datetime.fromtimestamp(int(line['dateActive'])))
if line.get('deviceModel') is not None:
login_log.devicemodel = line.get('deviceModel')
if line['platform'].strip() != '':
login_log.platform = line['platform']
if line.get('appName') is not None:
login_log.appname = line.get('appName')
if line['appVersion'] is not None:
login_log.appversion = line['appVersion']
if line['dateActive'] is not None:
login_log.activetime = str(
int(line['dateActive']) -
int(line['dateCreated'])) + 'seconds'
if line['ip'] is not None:
login_log.ip = line['ip']
idown_login_log_all.append_innerdata(login_log)
self._logger.info('Download clientlist complete')
yield idown_login_log_all
break
else:
self._logger.info(f'{msg}')
break
except Exception:
self._logger.error(f"Download telegram client list error:{traceback.format_exc()}")
if p is not None:
p.kill()
finally:
self._stopsigin = True
exitcode = p.wait()
self._logger.info(f"program return code: {str(exitcode)}")
p.poll()
p.terminate()
process_flush.join()
process_err.join()
def _download_sqlitdb(self):
"""
这里留给新接口,发送下载命令后,telegram会持续不断的下载数据,放在一个文件夹里
里面会存在6种数据,但是里面的信息不全,需要补全相关的任务信息
modify by judy 2020/05/19
下载进度 .idown_btask_back
profile .idown_profile
联系人 .idown_contact
群组信息 .ichat_group
聊天记录 .ichat_log
资源数据 .idown_resource
:return:
"""
args = self._common_args(self.task.phone)
argnew = [
f"-jar {self.telegram}",
f"-{'download'}",
f"-{'download_public_channel'}"
]
argnew.extend(args)
p = self._run_process(str(self.java), *argnew, taskid=self.task.batchid)
# 读取error
process_err = threading.Thread(target=self._read_err_log, args=(p,))
process_err.start()
# 刷新
process_flush = threading.Thread(target=self._std_flush, args=(p,))
process_flush.start()
self._stopsigin = False
try:
while True:
line = p.stdout.readline()
if line is not None and line != '':
msg: str = line.strip().rstrip()
if not msg.startswith("#") or msg.startswith("#@2000"):
self._logger.info(f"{msg}")
continue
retcode = msg
idx = msg.find(' ')
if idx < 0:
idx = msg.find('\t')
if idx > 0:
retcode = msg[:idx].strip().rstrip()
if retcode == '#@43':
self._logger.info(f'{msg}')
download_process = self._downloadprogress.findall(msg)
if len(download_process) != 0:
cmdrecmsg = f'正在下载,当前下载进度: {float(download_process[-1])}%'
self.task.progress = float(download_process[-1]) / 100
self._write_task_back(ECommandStatus.Dealing, cmdrecmsg)
elif retcode == '#@41':
self._logger.info(f'{msg}')
try:
# 去数据库拿数据,没拿到userid之前不能break
for result in self._get_sqlite_dbdata():
yield result
except Exception:
self._logger.error(f"Sqlite get data wrong, err:{traceback.format_exc()}")
break
else:
# 出现意外情况停止
self._logger.info(f'{msg}')
break
except:
self._logger.error(f"There was a problem downloading the database, err:{traceback.format_exc()}")
if p is not None:
p.kill()
finally:
exitcode = p.wait()
self._logger.info(f"program return code: {str(exitcode)}")
self._stopsigin = True
p.poll()
p.terminate()
process_flush.join()
process_err.join()
def _get_sqlite_dbdata(self):
"""
这个接口会去不断地扫描telegram下的文件夹
直到
下载完成
下载出错
超时30分钟后没有数据出现表示telegram可能已经被卡死也结束
modify by judy 2020/05/19
:return:
"""
dbfile: Path = self.accountsdir / f'{self.task.globaltelcode + self.task.phone}' / 'database.sqlite'
if not dbfile.exists():
raise Exception(f"Db file not exists: {dbfile}")
# 同理先下载profile数据,是为了给userid赋值
try:
profile_data = self.__get_profile(str(dbfile))
for profile in profile_data:
yield profile
except:
self._logger.error(f"Get user error,err:{traceback.format_exc()}")
# 再下载联系人数据
try:
contact_data = self._get_users(str(dbfile))
for c_one in contact_data:
yield c_one
except:
self._logger.error(f"Get user error,err:{traceback.format_exc()}")
# 下载群组数据
try:
for chatsdata in self._get_chats(str(dbfile)):
yield chatsdata
except:
self._logger.error(f"Get chats error,err:{traceback.format_exc()}")
# 下载聊天数据
try:
for messages in self._get_messages(str(dbfile)):
yield messages
except:
self._logger.error(f"Get message error,err:{traceback.format_exc()}")
# 备份数据库,据说是实现了增量下载不需要备份
# self.mvsqlitetobak(str(dbfile))
return
def __get_download_progress(self):
"""
在文件夹里扫描回馈,取最新的进度,添加信息后返回
:return:
"""
pass
def __get_profile(self, sqlpath: str):
"""
在文件夹里扫描profile后缀的数据,添加task信息后返回
因为profile里包含了userid并且只有一个文件,所以会优先将
profile数据获取到再进行后续数据获取
:param sqlpath:
:return:
"""
sql = '''
select * from users where self=?
'''
par = (1,)
res = self._select_data(sqlpath, sql, par)
if len(res) == 0:
self._logger.error("No profile in db")
return
eldict = res[0]
if eldict.get('id') is None:
self._logger.error("No profile in db")
return
self._userid = eldict.get('id')
p_data = PROFILE(self._clientid, self.task, self.task.apptype, self._userid)
phone = eldict.pop('phone')
if phone is not None and phone != '':
p_data.phone = self._output_format_str(phone)
p_data.account = self._phone
nickname = b''
if eldict.get('last_name') is not None:
nickname += eldict.pop('last_name')
if eldict.get('first_name') is not None:
nickname += eldict.pop('first_name')
if nickname == b'' and eldict.get('username') is not None:
nickname += eldict.pop('username')
if nickname != b'':
p_data.nickname = self._output_format_str(nickname)
# if len(prodata) > 0:
# p_data.append_details(prodata)
yield p_data
def _get_users(self, sqlpath: str):
"""
扫描联系人后缀的数据,添加task信息后返回
:param sqlpath:
:return:
"""
limitdata = 1000
offsetdata = 0
while True:
sql = '''SELECT
*
FROM users LIMIT ? OFFSET ?'''
pars = (
limitdata,
offsetdata,
)
offsetdata += limitdata
res = self._select_data(sqlpath, sql, pars)
if len(res) == 0:
return
all_contact = CONTACT(self._clientid, self.task, self.task.apptype)
for el in res:
try:
# 将为空的字段全部剔除
# eldict = {k: v for k, v in el.items() if v is not None}
eldict = el
if eldict.get('id') is None:
continue
contact_one = CONTACT_ONE(self._userid, eldict.get('id'), self.task, self.task.apptype)
if eldict.get('phone') is not None:
contact_one.phone = self._output_format_str(eldict.pop('phone'))
nickname = b''
if eldict.get('last_name') is not None:
nickname += eldict.pop('last_name')
if eldict.get('first_name') is not None:
nickname += eldict.pop('first_name')
if nickname == b'' and eldict.get('username') is not None:
nickname += eldict.pop('username')
if nickname != b'':
contact_one.nickname = self._output_format_str(nickname)
if eldict.get('contact') is not None:
contact_one.isfriend = eldict.pop('contact')
if eldict.get('mutualContact') is not None:
contact_one.bothfriend = eldict.pop('mutualContact')
if eldict.get('deleted') is not None:
contact_one.isdeleted = eldict.pop('deleted')
all_contact.append_innerdata(contact_one)
except Exception:
self._logger.error(f"Get profile error,err:{traceback.format_exc()}")
continue
if all_contact.innerdata_len != 0:
yield all_contact
if len(res) < limitdata:
break
def _get_chats(self, sqlpath: str):
"""
扫描群组后缀的数据,添加task信息后返回
:param sqlpath:
:return:
"""
limitdata = 1000
offsetdata = 0
while True:
sql = '''SELECT
*
FROM chats LIMIT ? OFFSET ?'''
pars = (
limitdata,
offsetdata,
)
offsetdata += limitdata
res = self._select_data(sqlpath, sql, pars)
if len(res) == 0:
return
re_chatall = re.compile('\[(\d+)\]')
ichat_all = ICHATGROUP(self._clientid, self.task, self.task.apptype)
for el in res:
if None in el.values():
continue
try:
# self._output_format_str(el['participants'])
if el.get('participants') is None or el.get('participants') == '':
continue
chat_line = self._output_format_str(el['participants'])
chat_all = re_chatall.findall(chat_line)
userid = self._userid
ichat_data = ICHATGROUP_ONE(self.task, self.task.apptype, userid, el['id'])
ichat_data.append_participants(*chat_all)
ichat_data.grouptype = self._output_format_str(el['type'])
ichat_data.groupname = self._output_format_str(el['name'])
ichat_all.append_innerdata(ichat_data)
except Exception:
self._logger.error(f"Get a chat error,err:{traceback.format_exc()}")
continue
if ichat_all.innerdata_len > 0:
yield ichat_all
if len(res) < limitdata:
break
def _get_messages(self, sqlpath: str):
"""
扫描聊天信息后缀,添加task信息后返回
扫描资源文件后缀,添加task信息后返回
:param sqlpath:
:return:
"""
# 获取个人聊天信息
for mes in self.__get_all_message_data(sqlpath):
yield mes
# 获取群组聊天信息
for channel_mes in self.__get_all_channel_message(sqlpath):
yield channel_mes
def __get_all_message_data(self, sqlpath: str):
limitdata = 1000
offsetdata = 0
while True:
sql = '''SELECT
id,
dialog_id,
chat_id,
sender_id,
text,
time,
has_media,
media_type,
media_file
FROM messages LIMIT ? OFFSET ?'''
pars = (
limitdata,
offsetdata,
)
offsetdata += limitdata
res = self._select_data(sqlpath, sql, pars)
if len(res) == 0:
return
for messinfo in self._process_messages(res):
yield messinfo
if len(res) < limitdata:
break
def __get_all_channel_message(self, sqlpath: str):
limitdata = 1000
offsetdata = 0
while True:
sql_1 = '''SELECT
id,
message_type,
chat_id,
sender_id,
text,
time,
has_media,
media_type,
media_file
FROM Channels_Message LIMIT ? OFFSET ?'''
pars = (
limitdata,
offsetdata,
)
offsetdata += limitdata
res = self._select_data(sqlpath, sql_1, pars)
if len(res) == 0:
return
for messinfo in self._process_messages(res):
yield messinfo
if len(res) < limitdata:
break
def _process_messages(self, res):
ichat_log_all = ICHATLOG(self._clientid, self.task, self.task.apptype)
for el in res:
info = el.get('text')
if (info is None or info == b'') and el.get('has_media') is None:
continue
try:
# 频道信息只能是自己
if el.get('message_type') == b'Channels_Message' and el.get('sender_id') is None:
el['sender_id'] = self._userid
if el.get('sender_id') is not None:
strname = ''
# 私聊信息
if el.get('has_media') == 1 and len(self._output_format_str(el['media_file']).split('.')) == 2:
# 写入资源文件
strname = self._output_format_str(el['media_file'])
# if len(strname.split('.')) != 2:
# self._logger.error(
# "File name error,the file has no suffixes,messagefilename:{}".format(strname))
# continue
# 这里获取文件的后缀名,并且转换为小写
file_extesnison = strname.split('.')[1]
messagetypetmp = [
key for key, value in self.datatype.items()
if file_extesnison.lower() in value
]
messagetypefunc = lambda x: EResourceType.Other_Text if len(x) == 0 else x[0]
message_type = messagetypefunc(messagetypetmp)
# 寻找本地的resources文件并且读取
resourcefile = self._write_resource_file(el, strname, message_type)
if resourcefile is None:
# file文件在本地没有就直接跳过吧
# 这个跳过显然不是很合理,但是一般如果资源文件里面还带有信息
# 如果我找不到文件那么这条信息也就没有意义,所以现在目前的方式就是不输出这条信息
continue
yield resourcefile
else:
message_type = EResourceType.Other_Text
# ichat_log = None
# 这里的messagetype用的是resources的type,但是在ichat_log的数据类型中需要的是int所以要取值
messagetype = message_type.value
if el.get('dialog_id') is not None:
# 私聊
ichat_log = ICHATLOG_ONE(
self.task, self.task.apptype, self._userid,
messagetype, el.get('dialog_id'), 0, str(el.get('id')),
el.get('sender_id'),
str(datetime.datetime.fromtimestamp(el['time'])))
elif el.get('chat_id') is not None:
# 群聊
ichat_log = ICHATLOG_ONE(
self.task, self.task.apptype, self._userid,
messagetype, el.get('chat_id'), 1, str(el.get('id')),
el.get('sender_id'),
str(datetime.datetime.fromtimestamp(el['time'])))
else:
continue
if strname == '' and el.get('text') is None:
# 如果文件是空的并且没有聊天数据那么这条数据就直接跳过了
continue
if strname != '':
# m_f_urls = [self._output_format_str(el.get('media_file'))]
ichat_log.append_resource(resourcefile)
if el.get('text') is not None:
ichat_log.content = self._output_format_str(el.get('text'))
ichat_log_all.append_innerdata(ichat_log)
except:
self._logger.error(f"Get single messages error,err:{traceback.format_exc()}")
continue
if ichat_log_all.innerdata_len > 0:
yield ichat_log_all
else:
return
def _write_resource_file(self, el: dict, strname, resourcetype):
resource = RESOURCES(self._clientid, self.task, strname, resourcetype,
self.task.apptype)
resource.filename = strname
resource.extension = strname.split('.')[1]
strtypename = self._output_format_str(el['media_type'])
fileb, telegram_file_path = self.readtherbfile({
"filetype": strtypename,
"account": self.task.globaltelcode + self.task.phone,
"filename": strname
})
if fileb is None:
# None表示数据暂时没有下载或者是数据已经回写了并且删除了
self._logger.info(f'No files were obtained locally, filename:{strname}')
return None
resource.io_stream = fileb
# 这里进行oncomplete的赋值
resource.isdeleteable = True
resource.filepath_telegram = telegram_file_path
resource.on_complete = self.delete_complete_file
return resource
def readtherbfile(self, fileinfo: dict):
"""
找到telegram下载的文件并读取文件流
:param fileinfo:
:return:
"""
fb = None
te_path = None
if fileinfo['filetype'] == 'sticker':
stickerspath = self.accountsdir / 'stickers'
stickername = stickerspath / fileinfo['filename']
if stickername.exists():
fb = open(stickername, 'rb+')
te_path = stickername
else:
documentpath = self.accountsdir / fileinfo['account'] / 'files'
documentname = documentpath / fileinfo['filename']
if documentname.exists():
fb = open(documentname, 'rb+')
te_path = documentname
return fb, te_path
|
the-stack_106_13837
|
# -*- coding: utf-8 -*-
"""
flask.wrappers
~~~~~~~~~~~~~~
Implements the WSGI wrappers (request and response).
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase
from werkzeug.exceptions import BadRequest
from . import json
from .globals import _request_ctx_stack
_missing = object()
def _get_data(req, cache):
getter = getattr(req, 'get_data', None)
if getter is not None:
return getter(cache=cache)
return req.data
class Request(RequestBase):
"""The request object used by default in Flask. Remembers the
matched endpoint and view arguments.
It is what ends up as :class:`~flask.request`. If you want to replace
the request object used you can subclass this and set
:attr:`~flask.Flask.request_class` to your subclass.
The request object is a :class:`~werkzeug.wrappers.Request` subclass and
provides all of the attributes Werkzeug defines plus a few Flask
specific ones.
"""
#: The internal URL rule that matched the request. This can be
#: useful to inspect which methods are allowed for the URL from
#: a before/after handler (``request.url_rule.methods``) etc.
#:
#: .. versionadded:: 0.6
url_rule = None
#: A dict of view arguments that matched the request. If an exception
#: happened when matching, this will be ``None``.
view_args = None
#: If matching the URL failed, this is the exception that will be
#: raised / was raised as part of the request handling. This is
#: usually a :exc:`~werkzeug.exceptions.NotFound` exception or
#: something similar.
routing_exception = None
# Switched by the request context until 1.0 to opt in deprecated
# module functionality.
_is_old_module = False
@property
def max_content_length(self):
"""Read-only view of the ``MAX_CONTENT_LENGTH`` config key."""
ctx = _request_ctx_stack.top
if ctx is not None:
return ctx.app.config['MAX_CONTENT_LENGTH']
@property
def endpoint(self):
"""The endpoint that matched the request. This in combination with
:attr:`view_args` can be used to reconstruct the same or a
modified URL. If an exception happened when matching, this will
be ``None``.
"""
if self.url_rule is not None:
return self.url_rule.endpoint
@property
def module(self):
"""The name of the current module if the request was dispatched
to an actual module. This is deprecated functionality, use blueprints
instead.
"""
from warnings import warn
warn(DeprecationWarning('modules were deprecated in favor of '
'blueprints. Use request.blueprint '
'instead.'), stacklevel=2)
if self._is_old_module:
return self.blueprint
@property
def blueprint(self):
"""The name of the current blueprint"""
if self.url_rule and '.' in self.url_rule.endpoint:
return self.url_rule.endpoint.rsplit('.', 1)[0]
@property
def json(self):
"""If the mimetype is :mimetype:`application/json` this will contain the
parsed JSON data. Otherwise this will be ``None``.
The :meth:`get_json` method should be used instead.
"""
from warnings import warn
warn(DeprecationWarning('json is deprecated. '
'Use get_json() instead.'), stacklevel=2)
return self.get_json()
@property
def is_json(self):
"""Indicates if this request is JSON or not. By default a request
is considered to include JSON data if the mimetype is
:mimetype:`application/json` or :mimetype:`application/*+json`.
.. versionadded:: 0.11
"""
mt = self.mimetype
if mt == 'application/json':
return True
if mt.startswith('application/') and mt.endswith('+json'):
return True
return False
def get_json(self, force=False, silent=False, cache=True):
"""Parses the incoming JSON request data and returns it. By default
this function will return ``None`` if the mimetype is not
:mimetype:`application/json` but this can be overridden by the
``force`` parameter. If parsing fails the
:meth:`on_json_loading_failed` method on the request object will be
invoked.
:param force: if set to ``True`` the mimetype is ignored.
:param silent: if set to ``True`` this method will fail silently
and return ``None``.
:param cache: if set to ``True`` the parsed JSON data is remembered
on the request.
"""
rv = getattr(self, '_cached_json', _missing)
# We return cached JSON only when the cache is enabled.
if cache and rv is not _missing:
return rv
if not (force or self.is_json):
return None
# We accept a request charset against the specification as
# certain clients have been using this in the past. This
# fits our general approach of being nice in what we accept
# and strict in what we send out.
request_charset = self.mimetype_params.get('charset')
try:
data = _get_data(self, cache)
if request_charset is not None:
rv = json.loads(data, encoding=request_charset)
else:
rv = json.loads(data)
except ValueError as e:
if silent:
rv = None
else:
rv = self.on_json_loading_failed(e)
if cache:
self._cached_json = rv
return rv
def on_json_loading_failed(self, e):
"""Called if decoding of the JSON data failed. The return value of
this method is used by :meth:`get_json` when an error occurred. The
default implementation just raises a :class:`BadRequest` exception.
.. versionchanged:: 0.10
Removed buggy previous behavior of generating a random JSON
response. If you want that behavior back you can trivially
add it by subclassing.
.. versionadded:: 0.8
"""
ctx = _request_ctx_stack.top
if ctx is not None and ctx.app.config.get('DEBUG', False):
raise BadRequest('Failed to decode JSON object: {0}'.format(e))
raise BadRequest()
def _load_form_data(self):
RequestBase._load_form_data(self)
# In debug mode we're replacing the files multidict with an ad-hoc
# subclass that raises a different error for key errors.
ctx = _request_ctx_stack.top
if ctx is not None and ctx.app.debug and \
self.mimetype != 'multipart/form-data' and not self.files:
from .debughelpers import attach_enctype_error_multidict
attach_enctype_error_multidict(self)
class Response(ResponseBase):
"""The response object that is used by default in Flask. Works like the
response object from Werkzeug but is set to have an HTML mimetype by
default. Quite often you don't have to create this object yourself because
:meth:`~flask.Flask.make_response` will take care of that for you.
If you want to replace the response object used you can subclass this and
set :attr:`~flask.Flask.response_class` to your subclass.
"""
default_mimetype = 'text/html'
|
the-stack_106_13838
|
from django.forms import ModelForm, TextInput
from requirements_manager.models import Requirement
class RequirementForm(ModelForm):
class Meta:
model = Requirement
fields = ["package_name", 'version']
widgets = {
"package_name": TextInput(attrs={'placeholder': 'Package name'}),
'version': TextInput(attrs={'placeholder': 'Version nr.'}),
}
|
the-stack_106_13840
|
from codecs import open
from os import path
from setuptools import setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="depict-control",
version="1.0",
description="Control your Depict digital art frames "
"(depict.com)",
long_description=long_description,
url="https://github.com/jkeljo/depict-control",
author="Jonathan Keljo",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Home Automation",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.5",
],
keywords="depict",
packages=["depict_control"],
install_requires=[
"aiohttp",
"netifaces",
],
python_requires=">=3.5.1",
)
|
the-stack_106_13843
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. F Y G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
the-stack_106_13844
|
from __future__ import absolute_import, division, print_function
import unittest
import os
import os.path
from astropy.io import fits
import numpy as np
import lvmspec.scripts.preproc
from lvmspec.preproc import preproc, _parse_sec_keyword, _clipped_std_bias
from lvmspec import io
def xy2hdr(xyslice):
'''
convert 2D slice into IRAF style [a:b,c:d] header value
e.g. xyslice2header(np.s_[0:10, 5:20]) -> '[6:20,1:10]'
'''
yy, xx = xyslice
value = '[{}:{},{}:{}]'.format(xx.start+1, xx.stop, yy.start+1, yy.stop)
return value
class TestPreProc(unittest.TestCase):
def tearDown(self):
for filename in [self.calibfile, self.rawfile, self.pixfile]:
if os.path.exists(filename):
os.remove(filename)
def setUp(self):
self.calibfile = 'test-calib-askjapqwhezcpasehadfaqp.fits'
self.rawfile = 'test-raw-askjapqwhezcpasehadfaqp.fits'
self.pixfile = 'test-pix-askjapqwhezcpasehadfaqp.fits'
primary_hdr = dict()
primary_hdr['DATE-OBS'] = '2018-09-23T08:17:03.988'
primary_hdr['DOSVER'] = 'SIM' # ICS version
hdr = dict()
hdr['CAMERA'] = 'b0'
hdr['DETECTOR'] = 'SIM' # CCD chip identifier
hdr['FEEVER'] = 'SIM' # readout electronic
#- [x,y] 1-indexed for FITS; in reality the amps will be symmetric
#- but the header definitions don't require that to make sure we are
#- getting dimensions correct
#- Dimensions per amp, not full 4-quad CCD
self.ny = ny = 500
self.nx = nx = 400
self.noverscan = nover = 50
#- BIASSEC = overscan region in raw image
#- DATASEC = data region in raw image
#- CCDSEC = where should this go in output
hdr['BIASSEC1'] = xy2hdr(np.s_[0:ny, nx:nx+nover])
hdr['DATASEC1'] = xy2hdr(np.s_[0:ny, 0:nx])
hdr['CCDSEC1'] = xy2hdr(np.s_[0:ny, 0:nx])
hdr['BIASSEC2'] = xy2hdr(np.s_[0:ny, nx+nover:nx+2*nover])
hdr['DATASEC2'] = xy2hdr(np.s_[0:ny, nx+2*nover:nx+2*nover+nx])
hdr['CCDSEC2'] = xy2hdr(np.s_[0:ny, nx:nx+nx])
hdr['BIASSEC3'] = xy2hdr(np.s_[ny:ny+ny, nx:nx+nover])
hdr['DATASEC3'] = xy2hdr(np.s_[ny:ny+ny, 0:nx])
hdr['CCDSEC3'] = xy2hdr(np.s_[ny:ny+ny, 0:nx])
hdr['BIASSEC4'] = xy2hdr(np.s_[ny:ny+ny, nx+nover:nx+2*nover])
hdr['DATASEC4'] = xy2hdr(np.s_[ny:ny+ny, nx+2*nover:nx+2*nover+nx])
hdr['CCDSEC4'] = xy2hdr(np.s_[ny:ny+ny, nx:nx+nx])
hdr['NIGHT'] = '20150102'
hdr['EXPID'] = 1
# add to header the minimal set of keywords needed to
# identify the config in the ccd_calibration.yaml file
self.primary_header = primary_hdr
self.header = hdr
self.rawimage = np.zeros((2*self.ny, 2*self.nx+2*self.noverscan))
self.offset = {'1':100.0, '2':100.5, '3':50.3, '4':200.4}
self.gain = {'1':1.0, '2':1.5, '3':0.8, '4':1.2}
self.rdnoise = {'1':2.0, '2':2.2, '3':2.4, '4':2.6}
self.quad = {
'1': np.s_[0:ny, 0:nx], '2': np.s_[0:ny, nx:nx+nx],
'3': np.s_[ny:ny+ny, 0:nx], '4': np.s_[ny:ny+ny, nx:nx+nx],
}
for amp in ('1', '2', '3', '4'):
self.header['GAIN'+amp] = self.gain[amp]
self.header['RDNOISE'+amp] = self.rdnoise[amp]
xy = _parse_sec_keyword(hdr['BIASSEC'+amp])
shape = [xy[0].stop-xy[0].start, xy[1].stop-xy[1].start]
self.rawimage[xy] += self.offset[amp]
self.rawimage[xy] += np.random.normal(scale=self.rdnoise[amp], size=shape)/self.gain[amp]
xy = _parse_sec_keyword(hdr['DATASEC'+amp])
shape = [xy[0].stop-xy[0].start, xy[1].stop-xy[1].start]
self.rawimage[xy] += self.offset[amp]
self.rawimage[xy] += np.random.normal(scale=self.rdnoise[amp], size=shape)/self.gain[amp]
#- raw data are integers, not floats
self.rawimage = self.rawimage.astype(np.int32)
#- Confirm that all regions were correctly offset
assert not np.any(self.rawimage == 0.0)
def test_preproc(self):
image = preproc(self.rawimage, self.header, primary_header = self.primary_header)
self.assertEqual(image.pix.shape, (2*self.ny, 2*self.nx))
self.assertTrue(np.all(image.ivar <= 1/image.readnoise**2))
for amp in ('1', '2', '3', '4'):
pix = image.pix[self.quad[amp]]
rdnoise = np.median(image.readnoise[self.quad[amp]])
npixover = self.ny * self.noverscan
self.assertAlmostEqual(np.mean(pix), 0.0, delta=3*rdnoise/np.sqrt(npixover))
self.assertAlmostEqual(np.std(pix), self.rdnoise[amp], delta=0.2)
self.assertAlmostEqual(rdnoise, self.rdnoise[amp], delta=0.2)
def test_bias(self):
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, bias=False)
bias = np.zeros(self.rawimage.shape)
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, bias=bias)
fits.writeto(self.calibfile, bias)
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, bias=self.calibfile)
with self.assertRaises(ValueError):
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, bias=bias[0:10, 0:10])
def test_pixflat(self):
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, pixflat=False)
pixflat = np.ones_like(image.pix)
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, pixflat=pixflat)
fits.writeto(self.calibfile, pixflat)
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, pixflat=self.calibfile)
with self.assertRaises(ValueError):
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, pixflat=pixflat[0:10, 0:10])
def test_mask(self):
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, mask=False)
mask = np.random.randint(0, 2, size=image.pix.shape)
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, mask=mask)
self.assertTrue(np.all(image.mask == mask))
fits.writeto(self.calibfile, mask)
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, mask=self.calibfile)
self.assertTrue(np.all(image.mask == mask))
with self.assertRaises(ValueError):
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, mask=mask[0:10, 0:10])
def test_pixflat_mask(self):
from lvmspec.maskbits import ccdmask
pixflat = np.ones((2*self.ny, 2*self.nx))
pixflat[0:10, 0:10] = 0.0
pixflat[10:20, 10:20] = 0.05
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, pixflat=pixflat)
self.assertTrue(np.all(image.mask[0:10,0:10] & ccdmask.PIXFLATZERO))
self.assertTrue(np.all(image.mask[10:20,10:20] & ccdmask.PIXFLATLOW))
def test_io(self):
io.write_raw(self.rawfile, self.rawimage, self.header, primary_header = self.primary_header, camera='b0')
io.write_raw(self.rawfile, self.rawimage, self.header, primary_header = self.primary_header, camera='R1')
io.write_raw(self.rawfile, self.rawimage, self.header, primary_header = self.primary_header, camera='z9')
self.header['CAMERA'] = 'B1'
io.write_raw(self.rawfile, self.rawimage, self.header, primary_header = self.primary_header)
b0 = io.read_raw(self.rawfile, 'b0')
b1 = io.read_raw(self.rawfile, 'b1')
r1 = io.read_raw(self.rawfile, 'r1')
z9 = io.read_raw(self.rawfile, 'Z9')
self.assertEqual(b0.meta['CAMERA'], 'b0')
self.assertEqual(b1.meta['CAMERA'], 'b1')
self.assertEqual(r1.meta['CAMERA'], 'r1')
self.assertEqual(z9.meta['CAMERA'], 'z9')
def test_32_64(self):
'''
64-bit integers aren't supported for compressed HDUs;
make sure we handle that gracefully
'''
data64 = np.linspace(0, 2**60, 10, dtype=np.int64)
datasmall64 = np.linspace(0, 2**30, 10, dtype=np.int64)
data32 = np.linspace(0, 2**30, 10, dtype=np.int32)
data16 = np.linspace(0, 2**10, 10, dtype=np.int16)
#- Primary HDU should be blank
#- Should be written as vanilla ImageHDU
io.write_raw(self.rawfile, data64, self.header, primary_header = self.primary_header, camera='b0')
#- Should be written as vanilla ImageHDU
io.write_raw(self.rawfile, data64, self.header, primary_header = self.primary_header, camera='b1')
#- Should be converted to 32-bit CompImageHDU
io.write_raw(self.rawfile, datasmall64, self.header, primary_header = self.primary_header, camera='b2')
#- Should be 32-bit CompImageHDU
io.write_raw(self.rawfile, data32, self.header, primary_header = self.primary_header, camera='b3')
#- Should be 16-bit CompImageHDU
io.write_raw(self.rawfile, data16, self.header, primary_header = self.primary_header, camera='b4')
fx = fits.open(self.rawfile)
#- Blank PrimaryHDU should have been inserted
self.assertTrue(isinstance(fx[0], fits.PrimaryHDU))
self.assertTrue(fx[0].data == None)
#- 64-bit image written uncompressed after blank HDU
self.assertTrue(isinstance(fx[1], fits.ImageHDU))
self.assertEqual(fx[1].data.dtype, np.dtype('>i8'))
self.assertEqual(fx[1].header['EXTNAME'], 'B0')
#- 64-bit image written uncompressed
self.assertTrue(isinstance(fx[2], fits.ImageHDU))
self.assertEqual(fx[2].data.dtype, np.dtype('>i8'))
self.assertEqual(fx[2].header['EXTNAME'], 'B1')
#- 64-bit image with small numbers converted to 32-bit compressed
self.assertTrue(isinstance(fx[3], fits.CompImageHDU))
self.assertEqual(fx[3].data.dtype, np.int32)
self.assertEqual(fx[3].header['EXTNAME'], 'B2')
#- 32-bit image written compressed
self.assertTrue(isinstance(fx[4], fits.CompImageHDU))
self.assertEqual(fx[4].data.dtype, np.int32)
self.assertEqual(fx[4].header['EXTNAME'], 'B3')
#- 16-bit image written compressed
self.assertTrue(isinstance(fx[5], fits.CompImageHDU))
self.assertEqual(fx[5].data.dtype, np.int16)
self.assertEqual(fx[5].header['EXTNAME'], 'B4')
# not a very useful test :
# it is tested by the other tests
#def test_keywords(self):
#for keyword in self.header:
#- Missing GAIN* and RDNOISE* are warnings but not errors
# if keyword.startswith('GAIN') or keyword.startswith('RDNOISE'):
# continue
#- DATE-OBS, NIGHT, and EXPID are also optional
#- (but maybe they should be required...)
# if keyword in ('DATE-OBS', 'NIGHT', 'EXPID'):
# continue
# if os.path.exists(self.rawfile):
# os.remove(self.rawfile)
# value = self.header[keyword]
# del self.header[keyword]
# with self.assertRaises(KeyError):
# io.write_raw(self.rawfile, self.rawimage, self.header, primary_header = self.primary_header)
# self.header[keyword] = value
#dateobs = self.header
#- striving for 100% coverage...
def test_pedantic(self):
with self.assertRaises(ValueError):
_parse_sec_keyword('blat')
#- should log a warning about large readnoise
rawimage = self.rawimage + np.random.normal(scale=2, size=self.rawimage.shape)
image = preproc(rawimage, self.header, primary_header = self.primary_header)
#- should log an error about huge readnoise
rawimage = self.rawimage + np.random.normal(scale=10, size=self.rawimage.shape)
image = preproc(rawimage, self.header, primary_header = self.primary_header)
#- should log a warning about small readnoise
rdnoise = 0.7 * np.mean(list(self.rdnoise.values()))
rawimage = np.random.normal(scale=rdnoise, size=self.rawimage.shape)
image = preproc(rawimage, self.header, primary_header = self.primary_header)
#- should log a warning about tiny readnoise
rdnoise = 0.01 * np.mean(list(self.rdnoise.values()))
rawimage = np.random.normal(scale=rdnoise, size=self.rawimage.shape)
image = preproc(rawimage, self.header, primary_header = self.primary_header)
#- Missing expected RDNOISE keywords shouldn't be fatal
hdr = self.header.copy()
del hdr['RDNOISE1']
del hdr['RDNOISE2']
del hdr['RDNOISE3']
del hdr['RDNOISE4']
image = preproc(self.rawimage, hdr, primary_header = self.primary_header)
#- Missing expected GAIN keywords should log error but not crash
hdr = self.header.copy()
del hdr['GAIN1']
del hdr['GAIN2']
del hdr['GAIN3']
del hdr['GAIN4']
image = preproc(self.rawimage, hdr, primary_header = self.primary_header)
def test_preproc_script(self):
io.write_raw(self.rawfile, self.rawimage, self.header, primary_header = self.primary_header, camera='b0')
io.write_raw(self.rawfile, self.rawimage, self.header, primary_header = self.primary_header, camera='b1')
args = ['--infile', self.rawfile, '--cameras', 'b1',
'--pixfile', self.pixfile]
if os.path.exists(self.pixfile):
os.remove(self.pixfile)
lvmspec.scripts.preproc.main(args)
img = io.read_image(self.pixfile)
self.assertEqual(img.pix.shape, (2*self.ny, 2*self.nx))
def test_clipped_std_bias(self):
'''Compare to www.wolframalpha.com integrals'''
self.assertAlmostEqual(_clipped_std_bias(1), 0.53956, places=5)
self.assertAlmostEqual(_clipped_std_bias(2), 0.879626, places=6)
self.assertAlmostEqual(_clipped_std_bias(3), 0.986578, places=6)
np.random.seed(1)
x = np.random.normal(size=1000000)
biased_std = np.std(x[np.abs(x)<3])
self.assertAlmostEqual(biased_std, _clipped_std_bias(3), places=3)
#- Not implemented yet, but flag these as expectedFailures instead of
#- successful tests of raising NotImplementedError
def test_default_bias(self):
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, bias=True)
def test_default_pixflat(self):
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, pixflat=True)
def test_default_mask(self):
image = preproc(self.rawimage, self.header, primary_header = self.primary_header, mask=True)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_13848
|
import sys
import numpy as np
import setuptools
from distutils.core import setup
from distutils.extension import Extension
package_name = 'GeodisTK'
module_name = 'GeodisTK'
version = sys.version[0]
wrap_source = './cpp/wrap_py{0:}.cpp'.format(version)
module1 = Extension(module_name,
include_dirs = [np.get_include(), './cpp'],
sources = ['./cpp/util.cpp',
'./cpp/geodesic_distance_2d.cpp',
'./cpp/geodesic_distance_3d.cpp',
'./cpp/geodesic_distance.cpp',
wrap_source])
# Get the summary
description = 'An open-source toolkit to calculate geodesic distance' + \
' for 2D and 3D images'
# Get the long description
if(sys.version[0] == '2'):
import io
with io.open('README.md', 'r', encoding='utf-8') as f:
long_description = f.read()
else:
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name = package_name,
version = "0.1.6",
author ='Guotai Wang',
author_email = '[email protected]',
description = description,
long_description = long_description,
long_description_content_type = 'text/markdown',
url = 'https://github.com/taigw/GeodisTK',
license = 'MIT',
packages = setuptools.find_packages(),
ext_modules = [module1],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
python_requires = '>=3.6',
)
# to build, run python setup.py build or python setup.py build_ext --inplace
# to install, run python setup.py install
|
the-stack_106_13849
|
import logging
import numpy as np
import torch
result_dict = {}
result_dict_entry = ["epi_dist_mean_gt", "num_prob", "num_warped_prob", "num_matches"]
######### shared functions
# @staticmethod
def read_file_dict_to_dict(files_dict, allow_pickle=False):
# load npz
exp_dict = {}
for i, en in enumerate(files_dict):
file = files_dict[en]
exp_dict[en] = np.load(file, allow_pickle=allow_pickle)
# exp_list.append(err_dict)
print(f"len of exp_dict: {len(list(exp_dict))}")
return exp_dict
# from utils.eval_tools import Result_processor
class Result_processor(object):
def __init__(self, result_dict_entry):
self.result_dict_all = {}
self.result_dict_entry = result_dict_entry
self.result_processed = {}
for ent in result_dict_entry:
self.result_dict_all[ent] = []
pass
def add_config(self, config):
"""
# add config: thd
"""
pass
def load_result(self, result_dict):
"""
result_dict: dictionary with entries
"""
for ent in self.result_dict_entry:
if ent not in result_dict:
logging.warning(f"{ent} not in the result dictionary")
else:
self.result_dict_all[ent].append(result_dict[ent])
# def read_to_ratio_dict(self, files_dict, allow_pickle=True):
# exp_dict = read_file_dict_to_dict(files_dict=files_dict, allow_pickle=True)
# for i, exp in enumerate(exp_dict):
# result_processor.result_dict_all = exp_dict[exp]
# result = result_processor.inlier_ratio(
# "epi_dist_mean_gt", inlier_thd, if_print=True
# )
# pass
def output_result(self, method=[None], **params):
for m in method:
if m is not None:
func = getattr(self, m)
func(params[m])
pass
# def inlier_ratio(
# self, entry, thd_list, mask_entry=None, mask_thd=1, if_print=False
# ):
def inlier_ratio(
self, result_list, thd_list, mask_list=None, mask_thd=1, if_print=False
):
"""
# mask_entry: ['mscores']
input:
result_list: one nested results from a sequence
mask_list: the score of the results (mscores for correspondences)
"""
# result_list = self.result_dict_all[entry]
# mask_list = None if mask_entry is None else self.result_dict_all[mask_entry]
# thd_list = np.array(thd_list)
table = []
num_corrs = []
for i, re in enumerate(result_list):
est_arr = np.array(re)
if mask_list is not None:
m = self.get_mask(mask_list[i], mask_thd)
assert (
m.shape[0] == est_arr.shape[0]
), "mask size not equal to estimated arr"
est_arr = est_arr[m]
num_corrs.append(est_arr.shape[0])
ratio_list = self.inlier_ratio_from_est(est_arr, thd_list)
table.append(ratio_list)
table = np.array(table)
# self.result_processed["inlier_ratio"] = table
results = {"inlier_ratio": table.mean(axis=0), "num_corrs": np.array(num_corrs)}
if if_print:
print(f"inlier ratio thd: {thd_list}, result: {results}")
return results
# def collect_arr_from_result(self, entry, if_print=False):
def get_entry_from_result(self, entry, if_print=False):
if entry in self.result_dict_all:
return self.result_dict_all[entry]
else:
logging.error(f"{entry} is not in the dictionary.")
def ap_inlier_thd(
self, inlier_entry, inlier_thds, mask_thds, mask_entry="mscores", if_print=False
):
table = []
num_corrs = []
for j, thd in enumerate(mask_thds):
# get inlier ratio under the thd
results = self.inlier_ratio(
inlier_entry,
inlier_thds,
mask_entry=mask_entry,
mask_thd=thd,
if_print=if_print,
)
table.append(results["inlier_ratio"])
num_corrs.append(results["num_corrs"])
table = np.array(table)
num_corrs = np.array(num_corrs)
print(f"table: {table.shape}")
results = {
"inlier_thd": table,
"num_corrs": num_corrs, # np [thds, Num of samples]
}
return results
pass
# def num_inlier_thd(self, inlier_entry, )
def save_result(self, filename, item):
if item == "result_dict_all":
np.savez_compressed(filename, **self.result_dict_all)
pass
@staticmethod
def get_mask(arr, thd):
return np.array(arr) < thd
def inlier_ratio_nested(self, est_nested, thd_list):
table = []
for i, re in enumerate(est_nested):
est_arr = np.array(re)
# print(f"est_arr: {est_arr}")
# num_corrs.append(est_arr.shape[0])
ratio_list = self.inlier_ratio_from_est(est_arr, thd_list)
table.append(ratio_list)
table = np.array(table)
# self.result_processed["inlier_ratio"] = table
# results = {"inlier_ratio": table.mean(axis=0), "num_corrs": np.array(num_corrs)}
return table.mean(axis=0)
@staticmethod
def inlier_ratio_from_est(est_arr, thd_list):
"""
inlier_ratio_from_est(est_arr, thd_list) -> list
"""
ratio_list = []
for thd in thd_list:
ratio = np.sum(est_arr < thd) / est_arr.shape[0]
ratio_list.append(ratio)
return ratio_list
# from Result_processor import inlier_ratio_from_est
from pathlib import Path
# from . import Result_processor
class Exp_table_processor(Result_processor):
"""
# process the results of different sequences.
# sort into table
"""
def __init__(self, config, seq_dict_name="seq_dict", debug=False,
if_mean=True, if_median=True, **params):
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(
format="[%(asctime)s %(levelname)s] %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
# level=logging.INFO,
level=level,
)
## params
self.ratio_dict = {}
# thd_list = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10]
# err_mat = ['err_q', 'err_t', 'epi_dists']
self.config = config
self.thd_list = config["data"]["thresh"]["thd_list"]
print(f"thd_list: {self.thd_list}")
self.err_mat = config["data"]["err_mat"]
self.seq_dict = {}
if type(seq_dict_name) is list:
for name in seq_dict_name:
self.seq_dict.update(config["data"][name])
else:
self.seq_dict.update(config["data"][seq_dict_name])
self.files_dict = self.read_file_list(
self.seq_dict, base_path=config["data"]["base_path"]
)
self.if_mean = if_mean
self.if_median = if_median
# self.if_highlights = self.config
self.exp_dict = None # the most important dictionary
## print to check
logging.debug(f"folder_list: {self.files_dict}")
pass
# def get_
@staticmethod
def read_file_list(seq_dict, base_path="", folder_idx=0, file_idx=1):
files_dict = {}
for i, en in enumerate(seq_dict):
files_dict[en] = (
Path(base_path) / seq_dict[en][folder_idx] / seq_dict[en][file_idx]
)
return files_dict
@staticmethod
def get_mean_median(name, arr, mean=False, median=False):
result = {}
if mean:
result[name + "_mean"] = [arr.mean()]
if median:
result[name + "_median"] = [np.median(arr)]
return result
@staticmethod
def read_gt_poses(path="/data/kitti/odometry/poses", seq=10):
# import dsac_tools.utils_misc as utils_misc
filename = f"{path}/{seq}.txt"
logging.info(f"read from: {filename}")
poses = np.genfromtxt(filename).astype(np.float32).reshape(-1, 3, 4)
return poses
@staticmethod
def compensate_poses(poses):
"""
# compensate part of the poses
input:
poses: np[batch, 3, 4]
"""
poses = np.stack([p for p in poses])
first_pose = poses[0]
poses[:, :, -1] -= first_pose[:, -1]
compensated_poses = (
np.linalg.inv(first_pose[:, :3]) @ poses
) # [3,3] @ [batch, 3, 4]
return compensated_poses
@staticmethod
def get_abs_poses(poses, if_print=False):
# table_processor.exp_dict['Si-D-k-f.k']['relative_poses_body']
poses_abs = []
poses_abs.append(np.identity(4)[:3])
last_pose = np.identity(4)
from numpy.linalg import inv
# for i, pose in enumerate(table_processor.exp_dict['Si-D-k-f.k']['relative_poses_body']):
for i, pose in enumerate(poses):
# print(f"{i}: {pose}")
last_pose = pose @ last_pose ## make sure the multiplication order.
poses_abs.append(inv(last_pose)[:3])
if if_print and i < 5:
print(f"pose abs: {poses_abs[-1]}")
poses_abs = np.array(poses_abs)
logging.info(f"get abs poses: {poses_abs.shape}")
return poses_abs
def get_all_abs_poses(self, item="relative_poses_body"):
poses_dict = {}
exp_dict = self.exp_dict
for i, exp in enumerate(exp_dict):
poses = exp_dict[exp][item]
poses_dict.update({exp: self.get_abs_poses(poses, if_print=False)})
self.poses_dict = poses_dict
logging.info(f"get poses from: {list(poses_dict)}")
return poses_dict
@staticmethod
def export_poses(poses_dict, path="logs/", prefix="", postfix="_date"):
folder = Path(path)
folder.mkdir(parents=True, exist_ok=True)
for i, exp in enumerate(poses_dict):
poses = poses_dict[exp]
### avoid using '.' in filename
exp_outname = exp.replace(".", "_")
filename = f"{path}/{prefix}{exp_outname}{postfix}.txt"
logging.info(f"save poses to: {filename}")
np.savetxt(filename, np.stack(poses).reshape(-1, 12), delimiter=" ")
@staticmethod
def compute_pose_error(gt, pred):
RE = 0
snippet_length = gt.shape[0]
scale_factor = np.sum(gt[:, :, -1] * pred[:, :, -1]) / np.sum(
pred[:, :, -1] ** 2
)
ATE = np.linalg.norm((gt[:, :, -1] - scale_factor * pred[:, :, -1]).reshape(-1))
for gt_pose, pred_pose in zip(gt, pred):
# Residual matrix to which we compute angle's sin and cos
R = gt_pose[:, :3] @ np.linalg.inv(pred_pose[:, :3])
s = np.linalg.norm(
[R[0, 1] - R[1, 0], R[1, 2] - R[2, 1], R[0, 2] - R[2, 0]]
)
c = np.trace(R) - 1
# Note: we actually compute double of cos and sin, but arctan2 is invariant to scale
RE += np.arctan2(s, c)
# return ATE/snippet_length, RE/snippet_length
return {
"ATE": ATE / snippet_length,
"RE": RE / snippet_length,
"scale_factor": scale_factor,
}
@staticmethod
def pose_seq_ate(est_poses, gt_poses, seq_length=5):
"""
# compute absolute translation error on small snippets
input:
est_poses: np[N, 3, 4]
gt_poses: np[N, 3, 4]
"""
assert len(est_poses) <= len(gt_poses)
# from evaluations.pose_evaluation_utils import poses_compensate
est_length = len(est_poses) - seq_length
errors = np.zeros((est_length, 2), np.float32)
scale_factors = []
algined_poses = []
for i in range(est_length):
est_pose_snip = Exp_table_processor.compensate_poses(
est_poses[i : i + seq_length]
)
gt_pose_snip = Exp_table_processor.compensate_poses(
gt_poses[i : i + seq_length]
)
results = Exp_table_processor.compute_pose_error(
est_pose_snip, gt_pose_snip
)
errors[i] = results["ATE"], results["RE"]
pose = np.copy(est_poses[i])
pose[:, -1] = pose[:, -1] * results["scale_factor"] ### buggy
algined_poses.append(pose)
scale_factors.append(results["scale_factor"])
mean_errors = errors.mean(0)
std_errors = errors.std(0)
error_names = ["ATE", "RE"]
print("")
print("Results")
print("\t {:>10}, {:>10}".format(*error_names))
print("mean \t {:10.4f}, {:10.4f}".format(*mean_errors))
print("std \t {:10.4f}, {:10.4f}".format(*std_errors))
return {
"errors": errors,
"scale_factors": scale_factors,
"aligned_poses": algined_poses,
}
def get_entry_mean_med(self, mean_mat, if_print=False, allow_pickle=False):
ratio_dict = self.ratio_dict
# exp_names = list(self.files_dict)
# exp_dict = read_file_dict_to_dict(self.files_dict, allow_pickle)
exp_dict = self.exp_dict
# assert self.exp_dict is not None
for i, exp in enumerate(exp_dict):
for en in mean_mat:
arr = exp_dict[exp][en]
arr = np.array(arr)
print(f"arr: {arr}")
# get mean
temp_dict = self.get_mean_median(
en, arr.flatten(), mean=True, median=True
)
print(f"temp_dict: {temp_dict}")
ratio_dict[exp].update(temp_dict)
self.ratio_dict = ratio_dict
print(f"ratio_dict: {ratio_dict}")
pass
def get_result_dict(self, if_print=False, nested=False, allow_pickle=False):
ratio_dict = {}
# read exps
err_mat = self.err_mat
exp_names = list(self.files_dict)
exp_dict = read_file_dict_to_dict(self.files_dict, allow_pickle)
self.exp_dict = exp_dict
thd_list = self.thd_list
print(f"err_mat: {err_mat}, exp_names: {exp_names}")
assert len(list(exp_dict)) == len(exp_names)
# loop through experiments
for i, exp in enumerate(exp_dict):
name = exp # exp_names[i]
ratio_dict[name] = {}
if if_print:
print(f"- name: {name}")
# loop through metrics
for en in err_mat:
arr = exp_dict[exp][en]
print(f"exp_dict[exp]: {list(exp_dict[exp])}")
# print(f"{exp}: {en}, {arr.shape}, ")
# print(f"{exp}: {en}, {arr[0].shape}, {type(arr[0])}")
# if arr[0] is np.ndarray:
if nested:
# ratio_list = self.inlier_ratio_nested(arr, thd_list)
results = self.inlier_ratio(
arr, thd_list, if_print=True, mask_list=exp_dict[exp]['mscores'], mask_thd=1.0
# "epi_dist_mean_gt", inlier_thd, if_print=True,
)
ratio_list = results['inlier_ratio']
else:
ratio_list = self.inlier_ratio_from_est(arr.reshape(-1, 1), thd_list)
ratio_list = np.array(ratio_list)
ratio_dict[name][en] = ratio_list
# get mean and median
if self.if_mean or self.if_median:
temp_dict = self.get_mean_median(
en, arr.flatten(), mean=self.if_mean, median=self.if_median
)
ratio_dict[name].update(temp_dict)
if if_print:
print(f" - exp: {en}")
# print(f"arr shape: {arr.shape}")
if if_print:
print(f"{ratio_list}")
if if_print:
print(f"ratio_dict: {ratio_dict}")
self.ratio_dict = ratio_dict
pass
@staticmethod
def get_highlights_table(reverse_arr, line_list, top_k=1):
## highligh the numbers
# top_k = 2
# reverse_arr = np.array([1, -1, -1]*4)
table_nums = np.array(line_list) * reverse_arr
def find_orders(x): # for 1d array
"""
# https://github.com/numpy/numpy/issues/8757
"""
idx = np.empty(len(x), np.intp)
idx[np.argsort(x)] = np.arange(len(x))[::-1] ## reverse order, 0 is largest
return idx
table_argsort = np.array([find_orders(nums) for nums in table_nums.transpose()])
table_highlights = table_argsort.transpose() < top_k
# print(f"table_argsort: {table_argsort}")
# print(f"table_argsort: {table_highlights}")
return {"table_argsort": table_argsort, "table_highlights": table_highlights}
def print_tables(self, if_name=True, table_list=[""], if_print=False):
def get_numbers(result_dict, entry_dict):
nums = []
for i, en in enumerate(entry_dict):
idxs = entry_dict[en]
if if_print: print(f"entry: {en}, idxs: {idxs}, nums: {nums}")
nums.extend([result_dict[en][i] for i in idxs])
return nums
pass
def list_to_style(item, a_list, seperator=" & ", highlights=None):
if highlights is None:
highlights = np.zeros(len(a_list))
highli = lambda x, hi: f"\\textbf{{{x:.3f}}}" if hi else f"{x:.3f}"
line = (
f"{item} & "
+ seperator.join([highli(i, hi) for (i, hi) in zip(a_list, highlights)])
+ "\n"
)
line += "\\\\ \\hline"
return line
# table_body.append(line)
## mapping models
ratio_dict = self.ratio_dict
name_dict = self.config["data"]["symbol_dict"]["models"] if if_name else None
models_desc = self.config["data"]["symbol_dict"]["models"]
for table in table_list:
table_body = []
spec = self.config["output"].get(table, None)
if spec is None:
continue
## print out the corresponding results
sep = spec["sep"]
name_list = []
line_list = []
for i, r in enumerate(spec["row"]):
line = []
for j, c in enumerate(spec["col"]):
seq_name = f"{r+sep+c}"
line.extend(get_numbers(ratio_dict[seq_name], spec["entries"]))
# name = name_dict[r] if if_name is not None else r
# name = seq_name
name = name_dict[r][0] if if_name is not None else r
name_list.append(name)
line_list.append(line)
# table_row = list_to_style(item=name, a_list=line)
# table_body.append(table_row)
if if_print: print(f"line_list: {line_list}")
highlights = spec.get('highlight', False)
if highlights == True:
reverse_arr = np.array([1, -1, -1] * 2)
data = self.get_highlights_table(reverse_arr, line_list, top_k=2)
table_highlights = data["table_highlights"]
else:
table_highlights = np.zeros_like(np.array(line_list))
table_body = [
list_to_style(item=name, a_list=line, highlights=hi)
for (name, line, hi) in zip(name_list, line_list, table_highlights)
]
table_ready = "\n".join(table_body)
## just print
print(f"table: {table}")
print(table_ready)
pass
@staticmethod
def plot_table_for_metrics(exp, constraint=None, if_table=True):
"""
input:
constraint: list [the index to extract]
"""
seperator = " & "
print(f"=== exp: {exp} ===")
table_body = []
for i, name in enumerate(exp_names):
exp_dict = ratio_dict[name]
line = ""
# for exp in err_mat:
a_list = [f"{i:.3f}" for i in exp_dict[exp]]
if constraint is not None:
a_list = [a_list[i] for i in constraint]
a_list = np.array(a_list).tolist()
# print(a_list)
line += f"({i+1}) & " + seperator.join(a_list) + "\n"
line = f"" + line + "\\\\ \\hline"
table_body.append(line)
# print(f"line: {line}")
# titles
if if_table:
table = table_begin + "\n".join(table_body) + table_end
else:
table = "\n".join(table_body)
return table
@staticmethod
def read_file_to_list(folder_list, result_names, allow_pickle=False):
# load npz
assert len(folder_list) == len(result_names)
exp_list = []
for exp, result_name in zip(folder_list, result_names):
err_dict = np.load(exp + "/" + result_name, allow_pickle=allow_pickle)
exp_list.append(err_dict)
return exp_list
print(f"len of exp_list: {len(exp_list)}")
class Val_pipeline_frontend(object):
def __init__(self, config, device="cpu"):
self.config = config
self.net_dict = {}
self.device = device
## for net_SP
self.net_SP_helper = {}
self.set_reproduce(self.config["training"]["reproduce"])
self.base_folder = "./"
@staticmethod
def set_reproduce(reproduce=False):
if reproduce == True:
# if config["training"]["reproduce"]:
logging.info("reproduce = True")
torch.manual_seed(0)
np.random.seed(0)
print(f"test random # : np({np.random.rand(1)}), torch({torch.rand(1)})")
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def add_net(self, name, net):
self.net_dict[name] = net
def net_toeval(self):
"""
# convert all the nets to eval model
"""
for i, name in enumerate(self.net_dict):
self.net_dict[name] = self.net_dict[name].eval()
pass
def load_net_deepF(self, name="net_deepF"):
from train_good_corr_4_vals_goodF_baseline import prepare_model
from utils.loader import modelLoader
device = self.device
config = self.config
img_zoom_xy = (
config["data"]["preprocessing"]["resize"][1]
/ config["data"]["image"]["size"][1],
config["data"]["preprocessing"]["resize"][0]
/ config["data"]["image"]["size"][0],
)
model_params = {
"depth": config["model"]["depth"],
"img_zoom_xy": img_zoom_xy,
"image_size": config["data"]["image"]["size"],
"quality_size": config["model"]["quality_size"],
"if_quality": config["model"]["if_quality"],
"if_img_des_to_pointnet": config["model"]["if_img_des_to_pointnet"],
"if_goodCorresArch": config["model"]["if_goodCorresArch"],
"if_img_feat": config["model"]["if_img_feat"],
"if_cpu_svd": config["model"]["if_cpu_svd"],
"if_learn_offsets": config["model"]["if_learn_offsets"],
"if_tri_depth": config["model"]["if_tri_depth"],
"if_sample_loss": config["model"]["if_sample_loss"],
}
net = modelLoader(config["model"]["name"], **model_params)
net, optimizer, n_iter, n_iter_val = prepare_model(
config, net, device, n_iter=0, n_iter_val=0, net_postfix=""
)
self.net_dict[name] = net
pass
## superpoint
def load_net_SP(self, name="net_SP"):
config = self.config
device = self.device
SP_params = {
"out_num_points": 2000,
"patch_size": 5,
"device": device,
"nms_dist": 4,
"conf_thresh": 0.015,
}
from superpoint.models.model_utils import SuperPointNet_process
from superpoint.models.model_wrap import PointTracker
from superpoint.models.SuperPointNet_gauss2 import SuperPointNet_gauss2
from train_good_corr_4_vals_goodF_baseline import prepare_model
# nn_thresh = config['training']['SP_params']['nn_thresh']
SP_processer = SuperPointNet_process(**SP_params)
SP_tracker = PointTracker(
max_length=2, nn_thresh=config["training"]["SP_params"]["nn_thresh"]
)
net_SP = SuperPointNet_gauss2()
net_SP, optimizer_SP, n_iter_SP, n_iter_val_SP = prepare_model(
config,
net_SP,
device,
n_iter=0,
n_iter_val=0,
net_postfix="_SP",
train=False,
)
logging.info("+++[Train]+++ training superpoint")
## put to class
self.net_SP_helper = {"SP_processer": SP_processer, "SP_tracker": SP_tracker}
self.net_dict[name] = net_SP
pass
def eval_one_sample(self, sample):
import torch
import dsac_tools.utils_F as utils_F # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_opencv as utils_opencv # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_vis as utils_vis # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_misc as utils_misc # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_geo as utils_geo # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
from train_good_utils import val_rt, get_matches_from_SP
# params
config = self.config
net_dict = self.net_dict
if_SP = self.config["model"]["if_SP"]
if_quality = self.config["model"]["if_quality"]
device = self.device
net_SP_helper = self.net_SP_helper
task = "validating"
imgs = sample["imgs"] # [batch_size, H, W, 3]
Ks = sample["K"].to(device) # [batch_size, 3, 3]
K_invs = sample["K_inv"].to(device) # [batch_size, 3, 3]
batch_size = Ks.size(0)
scene_names = sample["scene_name"]
frame_ids = sample["frame_ids"]
scene_poses = sample[
"relative_scene_poses"
] # list of sequence_length tensors, which with size [batch_size, 4, 4]; the first being identity, the rest are [[R; t], [0, 1]]
if config["data"]["read_what"]["with_X"]:
Xs = sample[
"X_cam2s"
] # list of [batch_size, 3, Ni]; only support batch_size=1 because of variable points Ni for each sample
# sift_kps, sift_deses = sample['sift_kps'], sample['sift_deses']
assert sample["get_flags"]["have_matches"][
0
].numpy(), "Did not find the corres files!"
matches_all, matches_good = sample["matches_all"], sample["matches_good"]
quality_all, quality_good = sample["quality_all"], sample["quality_good"]
delta_Rtijs_4_4 = scene_poses[
1
].float() # [batch_size, 4, 4], asserting we have 2 frames where scene_poses[0] are all identities
E_gts, F_gts = sample["E"], sample["F"]
pts1_virt_normalizedK, pts2_virt_normalizedK = (
sample["pts1_virt_normalized"].to(device),
sample["pts2_virt_normalized"].to(device),
)
pts1_virt_ori, pts2_virt_ori = (
sample["pts1_virt"].to(device),
sample["pts2_virt"].to(device),
)
# pts1_virt_ori, pts2_virt_ori = sample['pts1_velo'].to(device), sample['pts2_velo'].to(device)
# Get and Normalize points
if if_SP:
net_SP = net_dict["net_SP"]
SP_processer, SP_tracker = (
net_SP_helper["SP_processer"],
net_SP_helper["SP_tracker"],
)
xs, offsets, quality = get_matches_from_SP(
sample["imgs_grey"], net_SP, SP_processer, SP_tracker
)
matches_use = xs + offsets
# matches_use = xs + offsets
quality_use = quality
else:
# Get and Normalize points
matches_use = matches_good # [SWITCH!!!]
quality_use = quality_good.to(device) if if_quality else None # [SWITCH!!!]
## process x1, x2
matches_use = matches_use.to(device)
N_corres = matches_use.shape[1] # 1311 for matches_good, 2000 for matches_all
x1, x2 = (
matches_use[:, :, :2],
matches_use[:, :, 2:],
) # [batch_size, N, 2(W, H)]
x1_normalizedK = utils_misc._de_homo(
torch.matmul(
torch.inverse(Ks), utils_misc._homo(x1).transpose(1, 2)
).transpose(1, 2)
) # [batch_size, N, 2(W, H)], min/max_X=[-W/2/f, W/2/f]
x2_normalizedK = utils_misc._de_homo(
torch.matmul(
torch.inverse(Ks), utils_misc._homo(x2).transpose(1, 2)
).transpose(1, 2)
) # [batch_size, N, 2(W, H)], min/max_X=[-W/2/f, W/2/f]
matches_use_normalizedK = torch.cat((x1_normalizedK, x2_normalizedK), 2)
matches_use_ori = torch.cat((x1, x2), 2)
# Get image feats
if config["model"]["if_img_feat"]:
imgs = sample["imgs"] # [batch_size, H, W, 3]
imgs_stack = ((torch.cat(imgs, 3).float() - 127.5) / 127.5).permute(
0, 3, 1, 2
)
qs_scene = sample["q_scene"].to(device) # [B, 4, 1]
ts_scene = sample["t_scene"].to(device) # [B, 3, 1]
qs_cam = sample["q_cam"].to(device) # [B, 4, 1]
ts_cam = sample["t_cam"].to(device) # [B, 3, 1]
t_scene_scale = torch.norm(ts_scene, p=2, dim=1, keepdim=True)
# image_height, image_width = config['data']['image']['size'][0], config['data']['image']['size'][1]
# mask_x1 = (matches_use_ori[:, :, 0] > (image_width/8.*3.)).byte() & (matches_use_ori[:, :, 0] < (image_width/8.*5.)).byte()
# mask_x2 = (matches_use_ori[:, :, 2] > (image_width/8.*3.)).byte() & (matches_use_ori[:, :, 2] < (image_width/8.*5.)).byte()
# mask_y1 = (matches_use_ori[:, :, 1] > (image_height/8.*3.)).byte() & (matches_use_ori[:, :, 1] < (image_height/8.*5.)).byte()
# mask_y2 = (matches_use_ori[:, :, 3] > (image_height/8.*3.)).byte() & (matches_use_ori[:, :, 3] < (image_height/8.*5.)).byte()
# mask_center = (~(mask_x1 & mask_y1)) & (~(mask_x2 & mask_y2))
# matches_use_ori = (mask_center.float()).unsqueeze(-1) * matches_use_ori + torch.tensor([image_width/2., image_height/2., image_width/2., image_height/2.]).to(device).unsqueeze(0).unsqueeze(0) * (1- (mask_center.float()).unsqueeze(-1))
# x1, x2 = matches_use_ori[:, :, :2], matches_use_ori[:, :, 2:] # [batch_size, N, 2(W, H)]
data_batch = {
"matches_xy_ori": matches_use_ori,
"quality": quality_use,
"x1_normalizedK": x1_normalizedK,
"x2_normalizedK": x2_normalizedK,
"Ks": Ks,
"K_invs": K_invs,
"matches_good_unique_nums": sample["matches_good_unique_nums"],
"t_scene_scale": t_scene_scale,
}
# loss_params = {'model': config['model']['name'], 'clamp_at':config['model']['clamp_at'], 'depth': config['model']['depth']}
loss_params = {
"model": config["model"]["name"],
"clamp_at": config["model"]["clamp_at"],
"depth": config["model"]["depth"],
}
with torch.no_grad():
outs = net_dict["net_deepF"](data_batch)
pts1_eval, pts2_eval = pts1_virt_ori, pts2_virt_ori
# logits = outs['logits'] # [batch_size, N]
# logits_weights = F.softmax(logits, dim=1)
logits_weights = outs["weights"]
loss_E = 0.0
F_out, T1, T2, out_a = (
outs["F_est"],
outs["T1"],
outs["T2"],
outs["out_layers"],
)
pts1_eval = torch.bmm(T1, pts1_virt_ori.permute(0, 2, 1)).permute(0, 2, 1)
pts2_eval = torch.bmm(T2, pts2_virt_ori.permute(0, 2, 1)).permute(0, 2, 1)
# pts1_eval = utils_misc._homo(F.normalize(pts1_eval[:, :, :2], dim=2))
# pts2_eval = utils_misc._homo(F.normalize(pts2_eval[:, :, :2], dim=2))
loss_layers = []
losses_layers = []
# losses = utils_F.compute_epi_residual(pts1_eval, pts2_eval, F_est, loss_params['clamp_at']) #- res.mean()
# losses_layers.append(losses)
# loss_all = losses.mean()
# loss_layers.append(loss_all)
out_a.append(F_out)
loss_all = 0.0
for iter in range(loss_params["depth"]):
losses = utils_F.compute_epi_residual(
pts1_eval, pts2_eval, out_a[iter], loss_params["clamp_at"]
)
# losses = utils_F._YFX(pts1_eval, pts2_eval, out_a[iter], if_homo=True, clamp_at=loss_params['clamp_at'])
losses_layers.append(losses)
loss = losses.mean()
loss_layers.append(loss)
loss_all += loss
loss_all = loss_all / len(loss_layers)
F_ests = T2.permute(0, 2, 1).bmm(F_out.bmm(T1))
E_ests = Ks.transpose(1, 2) @ F_ests @ Ks
last_losses = losses_layers[-1].detach().cpu().numpy()
print(last_losses)
print(np.amax(last_losses, axis=1))
# E_ests_list = []
# for x1_single, x2_single, K, w in zip(x1, x2, Ks, logits_weights):
# E_est = utils_F._E_from_XY(x1_single, x2_single, K, torch.diag(w))
# E_ests_list.append(E_est)
# E_ests = torch.stack(E_ests_list).to(device)
# F_ests = utils_F._E_to_F(E_ests, Ks)
K_np = Ks.cpu().detach().numpy()
x1_np, x2_np = x1.cpu().detach().numpy(), x2.cpu().detach().numpy()
E_est_np = E_ests.cpu().detach().numpy()
F_est_np = F_ests.cpu().detach().numpy()
delta_Rtijs_4_4_cpu_np = delta_Rtijs_4_4.cpu().numpy()
# Tests and vis
idx = 0
img1 = imgs[0][idx].numpy().astype(np.uint8)
img2 = imgs[1][idx].numpy().astype(np.uint8)
img1_rgb, img2_rgb = img1, img2
img1_rgb_np, img2_rgb_np = img1, img2
im_shape = img1.shape
x1 = x1_np[idx]
x2 = x2_np[idx]
# utils_vis.draw_corr(img1, img2, x1, x2)
delta_Rtij = delta_Rtijs_4_4_cpu_np[idx]
print("----- delta_Rtij", delta_Rtij)
delta_Rtij_inv = np.linalg.inv(delta_Rtij)
K = K_np[idx]
F_gt_th = F_gts[idx].cpu()
F_gt = F_gt_th.numpy()
E_gt_th = E_gts[idx].cpu()
E_gt = E_gt_th.numpy()
F_est = F_est_np[idx]
E_est = E_est_np[idx]
unique_rows_all, unique_rows_all_idxes = np.unique(
np.hstack((x1, x2)), axis=0, return_index=True
)
angle_R = utils_geo.rot12_to_angle_error(np.eye(3), delta_Rtij_inv[:3, :3])
angle_t = utils_geo.vector_angle(
np.array([[0.0], [0.0], [1.0]]), delta_Rtij_inv[:3, 3:4]
)
print(
">>>>>>>>>>>>>>>> Between frames: The rotation angle (degree) %.4f, and translation angle (degree) %.4f"
% (angle_R, angle_t)
)
def plot_corrs(mask_sample, title="corres."):
utils_vis.draw_corr(
img1_rgb,
img2_rgb,
x1[mask_sample],
x2[mask_sample],
linewidth=2.0,
title=title,
)
pass
num = 100
plot_corrs(
mask_sample=np.random.choice(x1.shape[0], num),
title=f"Sample of {num} corres.",
)
# ## Baseline: 8-points
# M_8point, error_Rt_8point, mask2_8point, E_est_8point = utils_opencv.recover_camera_opencv(K, x1, x2, delta_Rtij_inv, five_point=False, threshold=0.01)
## Baseline: 5-points
five_point = False
M_opencv, error_Rt_opencv, mask2, E_return = utils_opencv.recover_camera_opencv(
K, x1, x2, delta_Rtij_inv, five_point=five_point, threshold=0.01
)
if five_point:
E_est_opencv = E_return
F_est_opencv = utils_F.E_to_F_np(E_est_opencv, K)
else:
E_est_opencv, F_est_opencv = E_return[0], E_return[1]
#################
print(""
# "----- OpenCV %s (%d unique inliers)" % (opencv_name, unique_rows.shape[0])
)
mask_sample= mask2
utils_vis.show_epipolar_rui_gtEst(
x2[mask_sample, :],
x1[mask_sample, :],
img2_rgb,
img1_rgb,
F_gt.T,
F_est_opencv.T,
weights=None,
im_shape=im_shape,
title_append="OpenCV 5-point with its inliers",
)
###################
## Check geo dists
print(f"K: {K}")
x1_normalizedK = utils_misc.de_homo_np(
(np.linalg.inv(K) @ utils_misc.homo_np(x1).T).T
)
x2_normalizedK = utils_misc.de_homo_np(
(np.linalg.inv(K) @ utils_misc.homo_np(x2).T).T
)
K_th = torch.from_numpy(K)
F_gt_normalized = K_th.t() @ F_gt_th @ K_th # Should be identical to E_gts[idx]
geo_dists = utils_F._sym_epi_dist(
F_gt_normalized,
torch.from_numpy(x1_normalizedK),
torch.from_numpy(x2_normalizedK),
).numpy()
geo_thres = 1e-4
mask_in = geo_dists < geo_thres
mask_out = geo_dists >= geo_thres
mask_sample = mask2
print(mask2.shape)
np.set_printoptions(precision=8, suppress=True)
## Ours: Some analysis
def plot_score_hist():
print("----- Oursssssssssss")
scores_ori = logits_weights.cpu().numpy().flatten()
import matplotlib.pyplot as plt
plt.hist(scores_ori, 100)
plt.show()
plot_score_hist()
sort_idxes = np.argsort(scores_ori[unique_rows_all_idxes])[::-1]
scores = scores_ori[unique_rows_all_idxes][sort_idxes]
num_corr = 100
mask_conf = sort_idxes[:num_corr]
# mask_sample = np.array(range(x1.shape[0]))[mask_sample][:20]
# utils_vis.draw_corr(
# img1_rgb,
# img2_rgb,
# x1[unique_rows_all_idxes],
# x2[unique_rows_all_idxes],
# linewidth=2.0,
# title=f"All {unique_rows_all_idxes.shape[0]} correspondences",
# )
plot_corrs(
mask_sample=unique_rows_all_idxes,
title=f"All {unique_rows_all_idxes.shape[0]} corres.",
)
utils_vis.draw_corr(
img1_rgb,
img2_rgb,
x1[unique_rows_all_idxes][mask_conf, :],
x2[unique_rows_all_idxes][mask_conf, :],
linewidth=2.0,
title=f"Ours top {num_corr} confidents",
)
# print('(%d unique corres)'%scores.shape[0])
utils_vis.show_epipolar_rui_gtEst(
x2[unique_rows_all_idxes][mask_conf, :],
x1[unique_rows_all_idxes][mask_conf, :],
img2_rgb,
img1_rgb,
F_gt.T,
F_est.T,
weights=scores_ori[unique_rows_all_idxes][mask_conf],
im_shape=im_shape,
title_append="Ours top %d with largest score points" % mask_conf.shape[0],
)
def print_val():
print(f"F_gt: {F_gt/F_gt[2, 2]}")
print(f"F_est: {F_est/F_est[2, 2]}")
error_Rt_est_ours, epi_dist_mean_est_ours, _, _, _, _, _, M_estW = val_rt(
idx,
K,
x1,
x2,
E_est,
E_gt,
F_est,
F_gt,
delta_Rtij,
five_point=False,
if_opencv=False,
)
print(
"Recovered by ours (camera): The rotation error (degree) %.4f, and translation error (degree) %.4f"
% (error_Rt_est_ours[0], error_Rt_est_ours[1])
)
# print(epi_dist_mean_est_ours, np.mean(epi_dist_mean_est_ours))
print(
"%.2f, %.2f"
% (
np.sum(epi_dist_mean_est_ours < 0.1)
/ epi_dist_mean_est_ours.shape[0],
np.sum(epi_dist_mean_est_ours < 1)
/ epi_dist_mean_est_ours.shape[0],
)
)
## OpenCV: Some analysis
corres = np.hstack((x1[mask_sample, :], x2[mask_sample, :]))
unique_rows = np.unique(corres, axis=0) if corres.shape[0] > 0 else corres
opencv_name = "5-point" if five_point else "8-point"
utils_vis.draw_corr(
img1_rgb,
img2_rgb,
x1[mask_sample, :],
x2[mask_sample, :],
linewidth=2.0,
title=f"OpenCV {opencv_name} inliers",
)
print(
"----- OpenCV %s (%d unique inliers)" % (opencv_name, unique_rows.shape[0])
)
utils_vis.show_epipolar_rui_gtEst(
x2[mask_sample, :],
x1[mask_sample, :],
img2_rgb,
img1_rgb,
F_gt.T,
F_est_opencv.T,
weights=None,
im_shape=im_shape,
title_append="OpenCV 5-point with its inliers",
)
print(F_gt / F_gt[2, 2])
print(F_est_opencv / F_est_opencv[2, 2])
error_Rt_est_5p, epi_dist_mean_est_5p, _, _, _, _, _, M_estOpenCV = val_rt(
idx,
K,
x1,
x2,
E_est_opencv,
E_gt,
F_est_opencv,
F_gt,
delta_Rtij,
five_point=False,
if_opencv=False,
)
print(
"Recovered by OpenCV %s (camera): The rotation error (degree) %.4f, and translation error (degree) %.4f"
% (opencv_name, error_Rt_est_5p[0], error_Rt_est_5p[1])
)
print(
"%.2f, %.2f"
% (
np.sum(epi_dist_mean_est_5p < 0.1) / epi_dist_mean_est_5p.shape[0],
np.sum(epi_dist_mean_est_5p < 1) / epi_dist_mean_est_5p.shape[0],
)
)
# dict_of_lists['opencv5p'].append((np.sum(epi_dist_mean_est_5p<0.1)/epi_dist_mean_est_5p.shape[0], np.sum(epi_dist_mean_est_5p<1)/epi_dist_mean_est_5p.shape[0]))
# dict_of_lists['ours'].append((np.sum(epi_dist_mean_est_ours<0.1)/epi_dist_mean_est_ours.shape[0], np.sum(epi_dist_mean_est_ours<1)/epi_dist_mean_est_ours.shape[0]))
print("+++ GT, Opencv_5p, Ours")
np.set_printoptions(precision=4, suppress=True)
print(delta_Rtij_inv[:3])
print(
np.hstack(
(
M_opencv[:, :3],
M_opencv[:, 3:4] / M_opencv[2, 3] * delta_Rtij_inv[2, 3],
)
)
)
print(
np.hstack(
(M_estW[:, :3], M_estW[:, 3:4] / M_estW[2, 3] * delta_Rtij_inv[2, 3])
)
)
return {"img1_rgb": img1_rgb, "img2_rgb": img2_rgb, "delta_Rtij": delta_Rtij}
def eval_one_sift(self, sample, it=0, save_folder="plots/vis_paper/"):
import torch
import dsac_tools.utils_F as utils_F # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_opencv as utils_opencv # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_vis as utils_vis # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_misc as utils_misc # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_geo as utils_geo # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
from train_good_utils import val_rt, get_matches_from_SP
# params
config = self.config
net_dict = self.net_dict
if_SP = self.config["model"]["if_SP"]
if_quality = self.config["model"]["if_quality"]
device = self.device
net_SP_helper = self.net_SP_helper
Path(save_folder).mkdir(parents=True, exist_ok=True)
task = "validating"
imgs = sample["imgs"] # [batch_size, H, W, 3]
Ks = sample["K"].to(device) # [batch_size, 3, 3]
K_invs = sample["K_inv"].to(device) # [batch_size, 3, 3]
batch_size = Ks.size(0)
scene_names = sample["scene_name"]
frame_ids = sample["frame_ids"]
scene_poses = sample[
"relative_scene_poses"
] # list of sequence_length tensors, which with size [batch_size, 4, 4]; the first being identity, the rest are [[R; t], [0, 1]]
if config["data"]["read_what"]["with_X"]:
Xs = sample[
"X_cam2s"
] # list of [batch_size, 3, Ni]; only support batch_size=1 because of variable points Ni for each sample
# sift_kps, sift_deses = sample['sift_kps'], sample['sift_deses']
assert sample["get_flags"]["have_matches"][
0
].numpy(), "Did not find the corres files!"
matches_all, matches_good = sample["matches_all"], sample["matches_good"]
quality_all, quality_good = sample["quality_all"], sample["quality_good"]
delta_Rtijs_4_4 = scene_poses[
1
].float() # [batch_size, 4, 4], asserting we have 2 frames where scene_poses[0] are all identities
E_gts, F_gts = sample["E"], sample["F"]
pts1_virt_normalizedK, pts2_virt_normalizedK = (
sample["pts1_virt_normalized"].to(device),
sample["pts2_virt_normalized"].to(device),
)
pts1_virt_ori, pts2_virt_ori = (
sample["pts1_virt"].to(device),
sample["pts2_virt"].to(device),
)
# pts1_virt_ori, pts2_virt_ori = sample['pts1_velo'].to(device), sample['pts2_velo'].to(device)
# Get and Normalize points
if if_SP:
logging.info("use sp!!")
net_SP = net_dict["net_SP"]
SP_processer, SP_tracker = (
net_SP_helper["SP_processer"],
net_SP_helper["SP_tracker"],
)
xs, offsets, quality = get_matches_from_SP(
sample["imgs_grey"], net_SP, SP_processer, SP_tracker
)
matches_use = xs + offsets
# matches_use = xs + offsets
quality_use = quality
else:
# Get and Normalize points
matches_use = matches_good # [SWITCH!!!]
quality_use = quality_good.to(device) if if_quality else None # [SWITCH!!!]
## process x1, x2
matches_use = matches_use.to(device)
N_corres = matches_use.shape[1] # 1311 for matches_good, 2000 for matches_all
x1, x2 = (
matches_use[:, :, :2],
matches_use[:, :, 2:],
) # [batch_size, N, 2(W, H)]
x1_normalizedK = utils_misc._de_homo(
torch.matmul(
torch.inverse(Ks), utils_misc._homo(x1).transpose(1, 2)
).transpose(1, 2)
) # [batch_size, N, 2(W, H)], min/max_X=[-W/2/f, W/2/f]
x2_normalizedK = utils_misc._de_homo(
torch.matmul(
torch.inverse(Ks), utils_misc._homo(x2).transpose(1, 2)
).transpose(1, 2)
) # [batch_size, N, 2(W, H)], min/max_X=[-W/2/f, W/2/f]
matches_use_normalizedK = torch.cat((x1_normalizedK, x2_normalizedK), 2)
matches_use_ori = torch.cat((x1, x2), 2)
# Get image feats
if config["model"]["if_img_feat"]:
imgs = sample["imgs"] # [batch_size, H, W, 3]
imgs_stack = ((torch.cat(imgs, 3).float() - 127.5) / 127.5).permute(
0, 3, 1, 2
)
qs_scene = sample["q_scene"].to(device) # [B, 4, 1]
ts_scene = sample["t_scene"].to(device) # [B, 3, 1]
qs_cam = sample["q_cam"].to(device) # [B, 4, 1]
ts_cam = sample["t_cam"].to(device) # [B, 3, 1]
t_scene_scale = torch.norm(ts_scene, p=2, dim=1, keepdim=True)
# image_height, image_width = config['data']['image']['size'][0], config['data']['image']['size'][1]
# mask_x1 = (matches_use_ori[:, :, 0] > (image_width/8.*3.)).byte() & (matches_use_ori[:, :, 0] < (image_width/8.*5.)).byte()
# mask_x2 = (matches_use_ori[:, :, 2] > (image_width/8.*3.)).byte() & (matches_use_ori[:, :, 2] < (image_width/8.*5.)).byte()
# mask_y1 = (matches_use_ori[:, :, 1] > (image_height/8.*3.)).byte() & (matches_use_ori[:, :, 1] < (image_height/8.*5.)).byte()
# mask_y2 = (matches_use_ori[:, :, 3] > (image_height/8.*3.)).byte() & (matches_use_ori[:, :, 3] < (image_height/8.*5.)).byte()
# mask_center = (~(mask_x1 & mask_y1)) & (~(mask_x2 & mask_y2))
# matches_use_ori = (mask_center.float()).unsqueeze(-1) * matches_use_ori + torch.tensor([image_width/2., image_height/2., image_width/2., image_height/2.]).to(device).unsqueeze(0).unsqueeze(0) * (1- (mask_center.float()).unsqueeze(-1))
# x1, x2 = matches_use_ori[:, :, :2], matches_use_ori[:, :, 2:] # [batch_size, N, 2(W, H)]
data_batch = {
"matches_xy_ori": matches_use_ori,
"quality": quality_use,
"x1_normalizedK": x1_normalizedK,
"x2_normalizedK": x2_normalizedK,
"Ks": Ks,
"K_invs": K_invs,
"matches_good_unique_nums": sample["matches_good_unique_nums"],
"t_scene_scale": t_scene_scale,
}
# loss_params = {'model': config['model']['name'], 'clamp_at':config['model']['clamp_at'], 'depth': config['model']['depth']}
loss_params = {
"model": config["model"]["name"],
"clamp_at": config["model"]["clamp_at"],
"depth": config["model"]["depth"],
}
with torch.no_grad():
outs = net_dict["net_deepF"](data_batch)
pts1_eval, pts2_eval = pts1_virt_ori, pts2_virt_ori
# logits = outs['logits'] # [batch_size, N]
# logits_weights = F.softmax(logits, dim=1)
logits_weights = outs["weights"]
loss_E = 0.0
F_out, T1, T2, out_a = (
outs["F_est"],
outs["T1"],
outs["T2"],
outs["out_layers"],
)
pts1_eval = torch.bmm(T1, pts1_virt_ori.permute(0, 2, 1)).permute(0, 2, 1)
pts2_eval = torch.bmm(T2, pts2_virt_ori.permute(0, 2, 1)).permute(0, 2, 1)
# pts1_eval = utils_misc._homo(F.normalize(pts1_eval[:, :, :2], dim=2))
# pts2_eval = utils_misc._homo(F.normalize(pts2_eval[:, :, :2], dim=2))
loss_layers = []
losses_layers = []
# losses = utils_F.compute_epi_residual(pts1_eval, pts2_eval, F_est, loss_params['clamp_at']) #- res.mean()
# losses_layers.append(losses)
# loss_all = losses.mean()
# loss_layers.append(loss_all)
out_a.append(F_out)
loss_all = 0.0
for iter in range(loss_params["depth"]):
losses = utils_F.compute_epi_residual(
pts1_eval, pts2_eval, out_a[iter], loss_params["clamp_at"]
)
# losses = utils_F._YFX(pts1_eval, pts2_eval, out_a[iter], if_homo=True, clamp_at=loss_params['clamp_at'])
losses_layers.append(losses)
loss = losses.mean()
loss_layers.append(loss)
loss_all += loss
loss_all = loss_all / len(loss_layers)
F_ests = T2.permute(0, 2, 1).bmm(F_out.bmm(T1))
E_ests = Ks.transpose(1, 2) @ F_ests @ Ks
last_losses = losses_layers[-1].detach().cpu().numpy()
print(last_losses)
print(np.amax(last_losses, axis=1))
# E_ests_list = []
# for x1_single, x2_single, K, w in zip(x1, x2, Ks, logits_weights):
# E_est = utils_F._E_from_XY(x1_single, x2_single, K, torch.diag(w))
# E_ests_list.append(E_est)
# E_ests = torch.stack(E_ests_list).to(device)
# F_ests = utils_F._E_to_F(E_ests, Ks)
K_np = Ks.cpu().detach().numpy()
x1_np, x2_np = x1.cpu().detach().numpy(), x2.cpu().detach().numpy()
E_est_np = E_ests.cpu().detach().numpy()
F_est_np = F_ests.cpu().detach().numpy()
delta_Rtijs_4_4_cpu_np = delta_Rtijs_4_4.cpu().numpy()
# Tests and vis
idx = 0
img1 = imgs[0][idx].numpy().astype(np.uint8)
img2 = imgs[1][idx].numpy().astype(np.uint8)
img1_rgb, img2_rgb = img1, img2
img1_rgb_np, img2_rgb_np = img1, img2
im_shape = img1.shape
x1 = x1_np[idx]
x2 = x2_np[idx]
# utils_vis.draw_corr(img1, img2, x1, x2)
delta_Rtij = delta_Rtijs_4_4_cpu_np[idx]
print("----- delta_Rtij", delta_Rtij)
delta_Rtij_inv = np.linalg.inv(delta_Rtij)
K = K_np[idx]
F_gt_th = F_gts[idx].cpu()
F_gt = F_gt_th.numpy()
E_gt_th = E_gts[idx].cpu()
E_gt = E_gt_th.numpy()
F_est = F_est_np[idx]
E_est = E_est_np[idx]
unique_rows_all, unique_rows_all_idxes = np.unique(
np.hstack((x1, x2)), axis=0, return_index=True
)
angle_R = utils_geo.rot12_to_angle_error(np.eye(3), delta_Rtij_inv[:3, :3])
angle_t = utils_geo.vector_angle(
np.array([[0.0], [0.0], [1.0]]), delta_Rtij_inv[:3, 3:4]
)
print(
">>>>>>>>>>>>>>>> Between frames: The rotation angle (degree) %.4f, and translation angle (degree) %.4f"
% (angle_R, angle_t)
)
def plot_corrs(mask_sample, title="corres."):
utils_vis.draw_corr(
img1_rgb,
img2_rgb,
x1[mask_sample],
x2[mask_sample],
linewidth=2.0,
title=title,
)
pass
num = 100
plot_corrs = False
if plot_corrs:
plot_corrs(
mask_sample=np.random.choice(x1.shape[0], num),
title=f"Sample of {num} corres.",
)
# ## Baseline: 8-points
# M_8point, error_Rt_8point, mask2_8point, E_est_8point = utils_opencv.recover_camera_opencv(K, x1, x2, delta_Rtij_inv, five_point=False, threshold=0.01)
## Baseline: 5-points
five_point = False
M_opencv, error_Rt_opencv, mask2, E_return = utils_opencv.recover_camera_opencv(
K, x1, x2, delta_Rtij_inv, five_point=five_point, threshold=0.01
)
if five_point:
E_est_opencv = E_return
F_est_opencv = utils_F.E_to_F_np(E_est_opencv, K)
else:
E_est_opencv, F_est_opencv = E_return[0], E_return[1]
#################
print("plot out sift + ransac"
# "----- OpenCV %s (%d unique inliers)" % (opencv_name, unique_rows.shape[0])
)
mask_sample = mask2
utils_vis.show_epipolar_rui_gtEst(
x2[mask_sample, :],
x1[mask_sample, :],
img2_rgb,
img1_rgb,
F_gt.T,
F_est_opencv.T,
weights=None,
im_shape=im_shape,
title_append="",
if_show=False,
linewidth=1.5
)
###################
import matplotlib.pyplot as plt
savefile = f'{save_folder}/sift_ransac_{it}.png'
plt.axis("off")
plt.savefig(savefile, dpi=300, bbox_inches="tight")
plt.show()
logging.info(f"save image: {savefile}")
def eval_sample_sift(self, sample):
pass
@staticmethod
def ransac_from_points(sample, plot_data):
import dsac_tools.utils_opencv as utils_opencv # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
idx = 0
# matches_all, matches_good = sample["matches_all"], sample["matches_good"]
# x1, x2 = (
# matches_use[:, :, :2],
# matches_use[:, :, 2:],
# )
x1, x2 = plot_data['x1'], plot_data['x2']
toNumpy = lambda x: x.cpu().numpy()
x1, x2 = toNumpy(x1), toNumpy(x2)
# scene_poses = sample[
# "relative_scene_poses"
# ]
# delta_Rtijs_4_4 = scene_poses[
# 1
# ].float()
delta_Rtijs_4_4 = plot_data['delta_Rtijs_4_4']
delta_Rtij = delta_Rtijs_4_4.cpu().numpy()[idx]
print("----- delta_Rtij", delta_Rtij)
delta_Rtij_inv = np.linalg.inv(delta_Rtij)
# E_gts, F_gts = sample["E"], sample["F"]
# matches_use = matches_good.cpu().numpy()
five_point = False
Ks = plot_data["Ks"]
K = Ks.cpu().numpy()[idx]
print(f"K: {K}")
M_opencv, error_Rt_opencv, mask2, E_return = utils_opencv.recover_camera_opencv(
K, x1, x2, delta_Rtij_inv, five_point=five_point, threshold=0.01
)
E_est_opencv, F_est_opencv = E_return[0], E_return[1]
# return {"E_est": E_return, ""}
logging.info(f"E_ests: {E_est_opencv}, F_est_opencv: {F_est_opencv}" )
mask2 = np.array(mask2)[np.newaxis, np.newaxis, ...]
logits_weights = np.zeros_like(x1.T).reshape(-1,1)
print(f"mask2: {np.array(mask2)}")
# logits_weights[mask2] = 1
# logits_weights = logits_weights.reshape(1,1,-1)
# logits_weights[mask2] = 1
# print(f"mask2: {np.array(mask2).max()}, logits_weights: {logits_weights.shape}")
print(f"logits_weights: {np.array(logits_weights)}")
return {"E_ests": E_est_opencv, "F_ests": F_est_opencv, "logits_weights": mask2}
# ransac_from_points(sample)
def run_eval(
self, sample, mode="full", plot_corr_list=[""], plot_epipolar_list=[""],
prefix='', postfix='', save=True, title=True,
ransac=False, base_folder = "plots/vis_paper"
):
"""
plot_corr_list=["random", "mask_epi_dist_gt"]
plot_epipolar_list=["mask_conf", "mask_epi_dist_gt"]
"""
data = self.get_sample_data(sample)
self.base_folder = base_folder
Path(self.base_folder).mkdir(parents=True, exist_ok=True)
if not ransac:
outs = self.run_net(data["data_batch"], data["loss_params"])
else:
logging.info(f"running RANSAC")
outs = self.ransac_from_points(data["data_batch"], data["plot_data"])
# outs are included in plot_data
# plot_data = data['plot_data'].update(outs)
plot_data = data["plot_data"]
plot_data.update(outs)
# logging.info(f"plot_data: {plot_data}")
data = self.get_plot_data(plot_data, idx=0)
plot_data = data["plot_data"]
img_data = self.get_img_data(sample["imgs"])
idx = sample['frame_ids']
# savefile = f"{prefix}{idx[0][0]}_{idx[1][0]}{postfix}" if save else None
savefile = f"{prefix}{postfix}" if save else None
self.plot_helper(plot_data, img_data, plot_corr_list, plot_epipolar_list, savefile, title=title)
pass
def get_sample_data(self, sample):
import torch
import dsac_tools.utils_F as utils_F # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_opencv as utils_opencv # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_vis as utils_vis # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_misc as utils_misc # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_geo as utils_geo # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
from train_good_utils import val_rt, get_matches_from_SP
# params
config = self.config
net_dict = self.net_dict
if_SP = self.config["model"]["if_SP"]
if_quality = self.config["model"]["if_quality"]
device = self.device
net_SP_helper = self.net_SP_helper
task = "validating"
imgs = sample["imgs"] # [batch_size, H, W, 3]
Ks = sample["K"].to(device) # [batch_size, 3, 3]
K_invs = sample["K_inv"].to(device) # [batch_size, 3, 3]
batch_size = Ks.size(0)
scene_names = sample["scene_name"]
frame_ids = sample["frame_ids"]
scene_poses = sample[
"relative_scene_poses"
] # list of sequence_length tensors, which with size [batch_size, 4, 4]; the first being identity, the rest are [[R; t], [0, 1]]
if config["data"]["read_what"]["with_X"]:
Xs = sample[
"X_cam2s"
] # list of [batch_size, 3, Ni]; only support batch_size=1 because of variable points Ni for each sample
# sift_kps, sift_deses = sample['sift_kps'], sample['sift_deses']
assert sample["get_flags"]["have_matches"][
0
].numpy(), "Did not find the corres files!"
matches_all, matches_good = sample["matches_all"], sample["matches_good"]
quality_all, quality_good = sample["quality_all"], sample["quality_good"]
delta_Rtijs_4_4 = scene_poses[
1
].float() # [batch_size, 4, 4], asserting we have 2 frames where scene_poses[0] are all identities
E_gts, F_gts = sample["E"], sample["F"]
pts1_virt_normalizedK, pts2_virt_normalizedK = (
sample["pts1_virt_normalized"].to(device),
sample["pts2_virt_normalized"].to(device),
)
pts1_virt_ori, pts2_virt_ori = (
sample["pts1_virt"].to(device),
sample["pts2_virt"].to(device),
)
# pts1_virt_ori, pts2_virt_ori = sample['pts1_velo'].to(device), sample['pts2_velo'].to(device)
## print info
delta_Rtijs_4_4_cpu_np = delta_Rtijs_4_4.cpu().numpy()
delta_Rtij_inv = np.linalg.inv(delta_Rtijs_4_4_cpu_np[0])
angle_R = utils_geo.rot12_to_angle_error(np.eye(3), delta_Rtij_inv[:3, :3])
angle_t = utils_geo.vector_angle(
np.array([[0.0], [0.0], [1.0]]), delta_Rtij_inv[:3, 3:4]
)
print(
">>>>>>>>>>>>>>>> Between frames: The rotation angle (degree) %.4f, and translation angle (degree) %.4f"
% (angle_R, angle_t)
)
# Get and Normalize points
if if_SP:
net_SP = net_dict["net_SP"]
SP_processer, SP_tracker = (
net_SP_helper["SP_processer"],
net_SP_helper["SP_tracker"],
)
# {'xs': xs, 'offsets': offsets, 'quality': quality, 'num_matches': num_matches, 'xs_SP': xs_SP}
data = get_matches_from_SP(
sample["imgs_grey"], net_SP, SP_processer, SP_tracker
)
xs, offsets, quality = data["xs"], data["offsets"], data["quality"]
xs_SP = data["xs_SP"]
matches_use = xs + offsets
# matches_use = xs + offsets
quality_use = quality
else:
# Get and Normalize points
matches_use = matches_good # [SWITCH!!!]
quality_use = quality_good.to(device) if if_quality else None # [SWITCH!!!]
## process x1, x2
matches_use = matches_use.to(device)
N_corres = matches_use.shape[1] # 1311 for matches_good, 2000 for matches_all
x1, x2 = (
matches_use[:, :, :2],
matches_use[:, :, 2:],
) # [batch_size, N, 2(W, H)]
x1_normalizedK = utils_misc._de_homo(
torch.matmul(
torch.inverse(Ks), utils_misc._homo(x1).transpose(1, 2)
).transpose(1, 2)
) # [batch_size, N, 2(W, H)], min/max_X=[-W/2/f, W/2/f]
x2_normalizedK = utils_misc._de_homo(
torch.matmul(
torch.inverse(Ks), utils_misc._homo(x2).transpose(1, 2)
).transpose(1, 2)
) # [batch_size, N, 2(W, H)], min/max_X=[-W/2/f, W/2/f]
matches_use_normalizedK = torch.cat((x1_normalizedK, x2_normalizedK), 2)
matches_use_ori = torch.cat((x1, x2), 2)
# Get image feats
if config["model"]["if_img_feat"]:
imgs = sample["imgs"] # [batch_size, H, W, 3]
imgs_stack = ((torch.cat(imgs, 3).float() - 127.5) / 127.5).permute(
0, 3, 1, 2
)
qs_scene = sample["q_scene"].to(device) # [B, 4, 1]
ts_scene = sample["t_scene"].to(device) # [B, 3, 1]
qs_cam = sample["q_cam"].to(device) # [B, 4, 1]
ts_cam = sample["t_cam"].to(device) # [B, 3, 1]
t_scene_scale = torch.norm(ts_scene, p=2, dim=1, keepdim=True)
# image_height, image_width = config['data']['image']['size'][0], config['data']['image']['size'][1]
# mask_x1 = (matches_use_ori[:, :, 0] > (image_width/8.*3.)).byte() & (matches_use_ori[:, :, 0] < (image_width/8.*5.)).byte()
# mask_x2 = (matches_use_ori[:, :, 2] > (image_width/8.*3.)).byte() & (matches_use_ori[:, :, 2] < (image_width/8.*5.)).byte()
# mask_y1 = (matches_use_ori[:, :, 1] > (image_height/8.*3.)).byte() & (matches_use_ori[:, :, 1] < (image_height/8.*5.)).byte()
# mask_y2 = (matches_use_ori[:, :, 3] > (image_height/8.*3.)).byte() & (matches_use_ori[:, :, 3] < (image_height/8.*5.)).byte()
# mask_center = (~(mask_x1 & mask_y1)) & (~(mask_x2 & mask_y2))
# matches_use_ori = (mask_center.float()).unsqueeze(-1) * matches_use_ori + torch.tensor([image_width/2., image_height/2., image_width/2., image_height/2.]).to(device).unsqueeze(0).unsqueeze(0) * (1- (mask_center.float()).unsqueeze(-1))
# x1, x2 = matches_use_ori[:, :, :2], matches_use_ori[:, :, 2:] # [batch_size, N, 2(W, H)]
data_batch = {
"matches_xy_ori": matches_use_ori,
"quality": quality_use,
"x1_normalizedK": x1_normalizedK,
"x2_normalizedK": x2_normalizedK,
"Ks": Ks,
"K_invs": K_invs,
"matches_good_unique_nums": sample["matches_good_unique_nums"],
"t_scene_scale": t_scene_scale,
"pts1_virt_ori": pts1_virt_ori,
"pts2_virt_ori": pts2_virt_ori,
}
# loss_params = {'model': config['model']['name'], 'clamp_at':config['model']['clamp_at'], 'depth': config['model']['depth']}
loss_params = {
"model": config["model"]["name"],
"clamp_at": config["model"]["clamp_at"],
"depth": config["model"]["depth"],
}
plot_data = {
"Ks": Ks,
"x1": x1,
"x2": x2,
"delta_Rtijs_4_4": delta_Rtijs_4_4,
"F_gts": F_gts,
"E_gts": E_gts,
}
if if_SP:
plot_data.update({"x1_SP": xs_SP[0], "x2_SP": xs_SP[1]})
return {
"data_batch": data_batch,
"loss_params": loss_params,
"plot_data": plot_data,
}
def get_img_data(self, imgs, idx=0):
# Tests and vis
idx = 0
img1 = imgs[0][idx].numpy().astype(np.uint8)
img2 = imgs[1][idx].numpy().astype(np.uint8)
img1_rgb, img2_rgb = img1, img2
return {"img1_rgb": img1_rgb, "img2_rgb": img2_rgb}
pass
def get_plot_data(self, plot_data, idx=0, if_print=False):
"""
input:
plot_data:
dict: {'Ks', 'x1', 'x2', 'E_ests', 'F_ests',
'F_gts', 'E_gts', 'delta_Rtijs_4_4'}
{'xs_SP': 'xs_SP'}
"""
assert idx <= 1 #####
# K_np = Ks.cpu().detach().numpy()
# x1_np, x2_np = x1.cpu().detach().numpy(), x2.cpu().detach().numpy()
# E_est_np = E_ests.cpu().detach().numpy()
# F_est_np = F_ests.cpu().detach().numpy()
# delta_Rtijs_4_4_cpu_np = delta_Rtijs_4_4.cpu().numpy()
## convert all items to numpy
plot_data_np = {}
plot_data_np_idx = {}
for i, en in enumerate(plot_data):
if not isinstance(plot_data[en], np.ndarray):
plot_data_np[en] = plot_data[en].cpu().detach().numpy()
else:
plot_data_np[en] = np.array(plot_data[en])
# # Tests and vis
# idx = 0
# img1 = imgs[0][idx].numpy().astype(np.uint8)
# img2 = imgs[1][idx].numpy().astype(np.uint8)
# img1_rgb, img2_rgb = img1, img2
# img1_rgb_np, img2_rgb_np = img1, img2
# im_shape = img1.shape
## name mapping
# {'plot_data_np': 'plot_data_np_idx'}
name_map = {
"Ks": "K",
"x1": "x1",
"x2": "x2",
"E_ests": "E_est",
"F_ests": "F_est",
"F_gts": "F_gt",
"E_gts": "E_gt",
"delta_Rtijs_4_4": "delta_Rtij",
}
for i, en in enumerate(plot_data_np):
name = name_map[en] if en in name_map else en
plot_data_np_idx[name] = plot_data_np[en][idx]
# x1 = x1_np[idx]
# x2 = x2_np[idx]
# delta_Rtij = delta_Rtijs_4_4_cpu_np[idx]
# K = K_np[idx]
# F_gt_th = F_gts[idx].cpu()
# F_gt = F_gt_th.numpy()
# E_gt_th = E_gts[idx].cpu()
# E_gt = E_gt_th.numpy()
# F_est = F_est_np[idx]
# E_est = E_est_np[idx]
delta_Rtij = plot_data_np_idx["delta_Rtij"]
if if_print:
print("----- delta_Rtij", delta_Rtij)
plot_data_np_idx["delta_Rtij_inv"] = np.linalg.inv(delta_Rtij)
return {"plot_data": plot_data_np_idx}
def get_val_rt(self, plot_data, idx=0, if_print=False):
from train_good_utils import val_rt
x1 = plot_data["x1"]
x2 = plot_data["x2"]
K = plot_data["K"]
E_gt = plot_data["E_gt"]
F_gt = plot_data["F_gt"]
E_est = plot_data["E_est"]
F_est = plot_data["F_est"]
delta_Rtij = plot_data["delta_Rtij"]
if if_print:
print(f"F_gt: {F_gt/F_gt[2, 2]}")
print(f"F_est: {F_est/F_est[2, 2]}")
result = val_rt(
idx,
K,
x1,
x2,
E_est,
E_gt,
F_est,
F_gt,
delta_Rtij,
five_point=False,
if_opencv=False,
)
error_Rt_estW, epi_dist_mean_estW, error_Rt_5point, epi_dist_mean_5point, error_Rt_gt, epi_dist_mean_gt = (
result[0],
result[1],
result[2],
result[3],
result[4],
result[5],
)
if if_print:
print(
"Recovered by ours (camera): The rotation error (degree) %.4f, and translation error (degree) %.4f"
% (error_Rt_estW[0], error_Rt_estW[1])
)
# print(epi_dist_mean_est_ours, np.mean(epi_dist_mean_est_ours))
epi_dist_mean_est_ours = epi_dist_mean_estW
print(
"%.2f, %.2f"
% (
np.sum(epi_dist_mean_est_ours < 0.1)
/ epi_dist_mean_est_ours.shape[0],
np.sum(epi_dist_mean_est_ours < 1)
/ epi_dist_mean_est_ours.shape[0],
)
)
return {
"error_Rt_est_ours": error_Rt_estW,
"epi_dist_mean_est_ours": epi_dist_mean_estW,
"epi_dist_mean_gt": epi_dist_mean_gt,
# 'M_estW': M_estW,
}
def run_net(self, data_batch, loss_params, if_print=False):
import torch
import dsac_tools.utils_F as utils_F # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_opencv as utils_opencv # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_vis as utils_vis # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_misc as utils_misc # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import dsac_tools.utils_geo as utils_geo # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
from train_good_utils import val_rt, get_matches_from_SP
net_dict = self.net_dict
Ks = data_batch["Ks"]
with torch.no_grad():
outs = net_dict["net_deepF"](data_batch)
pts1_virt_ori, pts2_virt_ori = (
data_batch["pts1_virt_ori"],
data_batch["pts2_virt_ori"],
)
pts1_eval, pts2_eval = pts1_virt_ori, pts2_virt_ori
# logits = outs['logits'] # [batch_size, N]
# logits_weights = F.softmax(logits, dim=1)
logits_weights = outs["weights"]
loss_E = 0.0
F_out, T1, T2, out_a = (
outs["F_est"],
outs["T1"],
outs["T2"],
outs["out_layers"],
)
pts1_eval = torch.bmm(T1, pts1_virt_ori.permute(0, 2, 1)).permute(0, 2, 1)
pts2_eval = torch.bmm(T2, pts2_virt_ori.permute(0, 2, 1)).permute(0, 2, 1)
# pts1_eval = utils_misc._homo(F.normalize(pts1_eval[:, :, :2], dim=2))
# pts2_eval = utils_misc._homo(F.normalize(pts2_eval[:, :, :2], dim=2))
loss_layers = []
losses_layers = []
# losses = utils_F.compute_epi_residual(pts1_eval, pts2_eval, F_est, loss_params['clamp_at']) #- res.mean()
# losses_layers.append(losses)
# loss_all = losses.mean()
# loss_layers.append(loss_all)
out_a.append(F_out)
loss_all = 0.0
for iter in range(loss_params["depth"]):
losses = utils_F.compute_epi_residual(
pts1_eval, pts2_eval, out_a[iter], loss_params["clamp_at"]
)
# losses = utils_F._YFX(pts1_eval, pts2_eval, out_a[iter], if_homo=True, clamp_at=loss_params['clamp_at'])
losses_layers.append(losses)
loss = losses.mean()
loss_layers.append(loss)
loss_all += loss
loss_all = loss_all / len(loss_layers)
F_ests = T2.permute(0, 2, 1).bmm(F_out.bmm(T1))
E_ests = Ks.transpose(1, 2) @ F_ests @ Ks
last_losses = losses_layers[-1].detach().cpu().numpy()
if if_print:
print(last_losses)
print(np.amax(last_losses, axis=1))
print(f"logits_weights: {logits_weights.shape}")
return {"E_ests": E_ests, "F_ests": F_ests, "logits_weights": logits_weights}
def plot_helper(
self,
plot_data,
img_data,
plot_corr_list=["random", "mask_epi_dist_gt"],
plot_epipolar_list=["mask_conf", "mask_epi_dist_gt"],
savefile=None,
title=True
):
import dsac_tools.utils_vis as utils_vis # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
import matplotlib.pyplot as plt
"""
input:
plot_data:
dict: {'K', 'x1', 'x2', 'E_est', 'F_est',
'F_gt', 'E_gt', 'delta_Rtij',
'logits_weights', }
img_data:
{'img1_rgb', 'img2_rgb'}
"""
# plot epipolar lines.
# plot correspondences
img1_rgb = img_data["img1_rgb"]
img2_rgb = img_data["img2_rgb"]
x1 = plot_data["x1"]
x2 = plot_data["x2"]
if_SP = self.config["model"]["if_SP"]
if if_SP:
x1_SP = plot_data["x1_SP"]
x2_SP = plot_data["x2_SP"]
K = plot_data["K"]
E_gt = plot_data["E_gt"]
F_gt = plot_data["F_gt"]
E_est = plot_data["E_est"]
F_est = plot_data["F_est"]
scores_ori = plot_data["logits_weights"].flatten()
# logging.info(f"scores_ori: {scores_ori.shape}")
im_shape = img1_rgb.shape
unique_rows_all, unique_rows_all_idxes = np.unique(
np.hstack((x1, x2)), axis=0, return_index=True
)
def get_score_mask(scores_ori, unique_rows_all_idxes, num_corr=100, top=True):
# num_corr = num_corr if top else -num_corr
sort_idxes = np.argsort(scores_ori[unique_rows_all_idxes])[::-1]
scores = scores_ori[unique_rows_all_idxes][sort_idxes]
# num_corr = 100
mask_conf = sort_idxes[:num_corr] if top else sort_idxes[-num_corr:]
logging.info(f" top 10: {scores[:10]}, last 10: {scores[-10:]}")
return mask_conf
def plot_corrs(
unique_rows_all_idxes=None,
mask_sample=None,
title="corres.",
savefile="test.png",
axis_off=True
):
def plot_scatter_xy(x1,img1_rgb,color='r',new_figure=False,zorder=2):
unique_rows_all, unique_rows_all_idxes = np.unique(
x1, axis=0, return_index=True
)
utils_vis.scatter_xy(
unique_rows_all,
color,
img1_rgb.shape,
title="",
new_figure=new_figure,
s=100, # 100
set_lim=False,
if_show=False,
cmap=None,
zorder=zorder
)
pass
assert unique_rows_all_idxes is not None or mask_sample is not None
# if unique_rows_all_idxes is None:
# x1 = x1[mask_sample]
# x2 = x2[mask_sample]
# else:
# x1 = x1[unique_rows_all_idxes][mask_conf, :]
# x2 = x2[unique_rows_all_idxes][mask_conf, :]
new_figure_corr = True
if_corr = True
color_t = 'g' # 'C0'
if if_corr:
utils_vis.draw_corr(
img1_rgb,
img2_rgb,
# x1 = x1[mask_sample] if unique_rows_all_idxes is None else x1[unique_rows_all_idxes][mask_conf, :],
# x2 = x2[mask_sample] if unique_rows_all_idxes is None else x2[unique_rows_all_idxes][mask_conf, :],
x1=x1[mask_sample]
if unique_rows_all_idxes is None
else x1[unique_rows_all_idxes],
x2=x2[mask_sample]
if unique_rows_all_idxes is None
else x2[unique_rows_all_idxes],
# x1[mask_sample],
# x2[mask_sample],
color=color_t,
new_figure=new_figure_corr,
linewidth=2,
title=title,
if_show=False,
zorder=1
)
if_SP =False; logging.warning(f"disable if_SP")
if if_SP:
def remove_x_from_y(x1, x1_SP, remove=True):
if remove:
x1_noCorr = x1_SP[(np.isin(x1_SP[:,0], x1[:,0]) * np.isin(x1_SP[:,1], x1[:,1]) ) == False]
else:
x1_noCorr = x1
return x1_noCorr
# x1_noCorr = x1_SP[(np.isin(x1_SP[:,0], x1[:,0]) * np.isin(x1_SP[:,1], x1[:,1]) ) == False]
x1_noCorr = remove_x_from_y(x1, x1_SP)
print(f"np.isin(x1_SP[:,0], x1[:,0]): {x1_SP[:5]}, {x1[:5]}")
color = 'r'
plot_scatter_xy(x1_noCorr,img1_rgb,color=color,new_figure=False,zorder=2)
x2_noCorr = remove_x_from_y(x2, x2_SP)
# x2_noCorr = x1_SP[np.isin(x2_SP[:,0], x2[:,0]) * np.isin(x2_SP[:,1], x2[:,1]) ]
x2_noCorr[:, 0] += img1_rgb.shape[1]
plot_scatter_xy(x2_noCorr,img1_rgb,color=color,new_figure=False,zorder=2)
# new_figure_corr = False
# x2[:, 0] += img1_rgb.shape[1]
x2_shift = x2 + 0
x2_shift[:, 0] += img1_rgb.shape[1]
plot_scatter_xy(x1,img1_rgb,color=color_t,new_figure=False,zorder=2)
plot_scatter_xy(x2_shift,img1_rgb,color=color_t,new_figure=False,zorder=2)
if axis_off:
plt.axis('off')
if savefile is not None:
plt.savefig(savefile, dpi=300, bbox_inches="tight")
logging.info(f"save image: {savefile}")
plt.show()
def plot_epipolar(unique_rows_all_idxes, mask_conf, title="", savefile=None, axis_off=True):
# if mask_conf is None:
# mask_conf = np.ones_like(unique_rows_all_idxes)
# utils_vis.show_epipolar_rui_gtEst(
# x2[unique_rows_all_idxes][:],
# x1[unique_rows_all_idxes][:],
# img2_rgb,
# img1_rgb,
# F_gt.T,
# F_est.T,
# # weights=scores_ori[unique_rows_all_idxes],
# weights=None,
# im_shape=im_shape,
# title_append=title,
# if_show=False,
# linewidth=1.5
# )
utils_vis.show_epipolar_rui_gtEst(
x2[unique_rows_all_idxes][mask_conf, :],
x1[unique_rows_all_idxes][mask_conf, :],
img2_rgb,
img1_rgb,
F_gt.T,
F_est.T,
weights=scores_ori[unique_rows_all_idxes][mask_conf],
im_shape=im_shape,
title_append=title,
if_show=False,
linewidth=1.5
)
if axis_off:
plt.axis('off')
if savefile is not None:
plt.savefig(savefile, dpi=300, bbox_inches="tight")
logging.info(f"save image: {savefile}")
plt.show()
# base_folder = "plots/vis_paper"
base_folder = self.base_folder
if "all" in plot_corr_list:
# for i, xs in enumerate(x1_SP):
# print(f"x1_SP: {x1_SP.shape}")
file = None
if savefile is not None:
file = f"{base_folder}/corr_all_{savefile}.png"
logging.info(f"save image: {savefile}")
unique_rows_all, unique_rows_all_idxes = np.unique(
x1, axis=0, return_index=True
)
plot_corrs(
unique_rows_all_idxes=unique_rows_all_idxes,
mask_sample=None,
title=f"Sample of {unique_rows_all_idxes.shape[0]} corres." if title else "",
savefile=file,
)
if "random" in plot_corr_list:
# num = 100
file = None
if savefile is not None:
file = f"plots/corr_all_random_{savefile}.png"
logging.info(f"save image: {savefile}")
unique_rows_all, unique_rows_all_idxes = np.unique(
x1, axis=0, return_index=True
)
# logging.info(f"unique_rows_all: {unique_rows_all}, unique_rows_all: {unique_rows_all}")
percentage = 0.3
num = int(unique_rows_all.shape[0]*percentage)
logging.info(f"sample {percentage} of corrs. num: {num}")
plot_corrs(
mask_sample=unique_rows_all_idxes[np.random.choice(unique_rows_all_idxes.shape[0], num)],
title=f"Sample of {num} corres." if title else "",
savefile=file,
)
if "mask_epi_dist_gt" in plot_corr_list:
num_points = 100
data = self.get_val_rt(plot_data)
mask_conf = get_score_mask(
data["epi_dist_mean_gt"].flatten(),
unique_rows_all_idxes,
num_corr=num_points,
top=False,
)
plot_corrs(
unique_rows_all_idxes,
mask_conf,
title=f"Top {mask_conf.shape[0]} correspondences with lowest epipolar distance" if title else "",
)
num_points = 80
if "mask_conf" in plot_epipolar_list:
file = None
if savefile is not None:
file = f"{base_folder}/mask_conf_{savefile}.png"
logging.info(f"save image: {savefile}")
print(f"scores_ori: {scores_ori.shape}, {scores_ori[0]}")
mask_conf = get_score_mask(scores_ori, unique_rows_all_idxes, num_corr=num_points)
print(f"mask_conf: {mask_conf}")
## sift version
# print(f"x1: {x1.shape}, x2: {x2.shape}")
# plot_epipolar(
# scores_ori,
# None,
# title=f"Ours top {mask_conf.shape[0]} with largest score points" if title else "",
# savefile=file
# )
# original
plot_epipolar(
unique_rows_all_idxes,
mask_conf,
title=f"Ours top {mask_conf.shape[0]} with largest score points" if title else "",
savefile=file
)
if "mask_epi_dist_gt" in plot_epipolar_list:
data = self.get_val_rt(plot_data, if_print=True)
# logging.info(f"data['epi_dist_mean_gt']: {data['epi_dist_mean_gt'].shape}")
file = None
if savefile is not None:
file = f"{base_folder}/epi_dist_all_{savefile}.png"
logging.info(f"save image: {savefile}")
mask_conf = get_score_mask(
data["epi_dist_mean_gt"].flatten(),
unique_rows_all_idxes,
num_corr=num_points,
top=False,
)
plot_epipolar(
unique_rows_all_idxes,
mask_conf,
title=f"Top {mask_conf.shape[0]} points with lowest epipolar distance" if title else "",
savefile=file
)
## plot points selected from gt or deepF.
pass
# data = eval_one_sample(config, sample)
if __name__ == "__main__":
import logging
import argparse
import yaml
from settings import EXPER_PATH
logging.basicConfig(
format="[%(asctime)s %(levelname)s] %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.DEBUG,
)
# add parser
parser = argparse.ArgumentParser(description="Foo")
# Training command
parser.add_argument("config", type=str)
parser.add_argument("exper_name", type=str)
args = parser.parse_args(
"configs/table_trans_rot_kitti_apollo.yaml table_rot_test".split()
)
print(args)
## load configs
with open(args.config, "r") as f:
config = yaml.load(f)
output_dir = os.path.join(EXPER_PATH, args.exper_name)
print(output_dir)
print(f"config: {config}")
## read results
from utils.eval_tools import Exp_table_processor
table_processor = Exp_table_processor(config, debug=True)
table_processor.get_result_dict()
## print out table
table_processor.print_tables(if_name=True, table_list=["table_1"])
|
the-stack_106_13850
|
"""elbb.server"""
import json
import asyncio
import threading
from elbb.meta import BANNER
from elbb.playbooks import manifest
from elbb.engine import launch_client
from elbb.queue import get_queue, clear_queue
from sanic import Sanic, response
from sanic.websocket import WebSocketProtocol
app = Sanic(name='elbb')
async def _consumer_handler(ws):
while True:
data = await ws.recv()
data = json.loads(data)
target_func = None
command = data['command']
args = data['args']
# get target function from playbook manifest
if command in manifest:
target_func = manifest[command]
if target_func:
t = threading.Thread(target=target_func, args=[*args.values()])
t.daemon = True
t.start()
async def _producer_handler(ws):
queue = get_queue()
clear_queue()
if queue:
await ws.send(queue)
await asyncio.sleep(.1)
@app.websocket('/elbb_connect')
async def elbb_connect(request, ws):
# launch game client (if it's not already running)
launch_client()
while True:
consumer_task = asyncio.ensure_future(_consumer_handler(ws))
producer_task = asyncio.ensure_future(_producer_handler(ws))
done, pending = await asyncio.wait(
[consumer_task, producer_task],
return_when=asyncio.FIRST_COMPLETED,
)
for task in pending:
task.cancel()
def start_server():
app.run('0.0.0.0', port=51337, protocol=WebSocketProtocol)
|
the-stack_106_13856
|
# encoding: utf-8
import multiprocessing
import sys
from PyQt5.QtCore import QObject, QTimer, QUrl, pyqtSignal, pyqtSlot, Qt
from PyQt5.QtGui import QFont, QGuiApplication
from PyQt5.QtWidgets import (
QApplication, QLabel, QWidget, QHBoxLayout, QVBoxLayout, QDesktopWidget
)
#from PyQt5.QtQml import QQmlComponent, QQmlEngine, QQmlApplicationEngine
from data import DataSource
from data_ser import DataSer
class Info:
update_ms = 16
font = QFont('SansSerif', 50)
title_font = QFont('SansSerif', 60)
window_sheet = 'background-color: #e0e0e0;'
label_sheet = 'QLabel {color: #bb0000;}'
title = "RISLAB Telemetry"
def __init__(self):
self.data_ser = DataSer()
self.process = multiprocessing.Process(target=self.ros_process)
self.process.start()
self.title_font.setBold(True)
self.labels = []
self.w = QWidget()
self.w.resize(1280, 720)
self.w.setWindowTitle(self.title)
self.w.setStyleSheet(self.window_sheet)
#self.engine = QQmlApplicationEngine()
#self.context = self.engine.rootContext()
#self.engine.load('telem.qml')
#self.comp = QQmlComponent(self.engine)
#self.comp.loadUrl(QUrl('telem.qml'))
#level = self.comp.create()
#if level is not None:
# self.engine.show()
#else:
# for err in self.comp.errors():
# print(err.toString())
# sys.exit(1)
main_vbox = QVBoxLayout()
self.w.setLayout(main_vbox)
hbox1 = QHBoxLayout()
main_vbox.addLayout(hbox1)
cols = [
("Pos Err", "pos", 3),
("Vel Err", "vel", 3),
("Accel Dist", "accel_dist", 3),
("Torque Dist", "torque_dist", 3)
]
self.updates = []
for col_name, col_id, n in cols:
vbox = QVBoxLayout()
hbox1.addLayout(vbox)
title_label = QLabel(col_name)
title_label.setFont(self.title_font)
title_label.setStyleSheet(self.label_sheet)
vbox.addWidget(title_label)
for i in range(n):
label = QLabel("", self.w)
label.setStyleSheet(self.label_sheet)
label.setAlignment(Qt.AlignRight)
self.updates.append((label, "", (col_id, i), "%0.3f"))
label.setFont(self.font)
vbox.addWidget(label)
self.labels.append(label)
hbox1.addSpacing(70)
hbox3 = QHBoxLayout()
main_vbox.addLayout(hbox3)
title_label = QLabel("Yaw Err: ")
title_label.setFont(self.title_font)
title_label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
title_label.setStyleSheet(self.label_sheet)
hbox3.addWidget(title_label)
label = QLabel("", self.w)
label.setStyleSheet(self.label_sheet)
label.setAlignment(Qt.AlignVCenter)
label.setFont(self.font)
self.updates.append((label, "", ("yawerr", 0), "%0.3f°"))
hbox3.addWidget(label)
hbox2 = QHBoxLayout()
main_vbox.addLayout(hbox2)
cols = [
("Euler (RPY)", "euler", 3),
("RPM", "rpms", 4),
]
for col_name, col_id, n in cols:
vbox = QVBoxLayout()
hbox2.addLayout(vbox)
title_label = QLabel(col_name)
title_label.setAlignment(Qt.AlignCenter)
title_label.setFont(self.title_font)
title_label.setStyleSheet(self.label_sheet)
vbox.addWidget(title_label)
fmt_s = "%0.3f°" if col_id == "euler" else "%05d"
for i in range(n):
label = QLabel("", self.w)
label.setAlignment(Qt.AlignRight)
label.setStyleSheet(self.label_sheet)
self.updates.append((label, "", (col_id, i), fmt_s))
label.setFont(self.font)
vbox.addWidget(label)
self.labels.append(label)
hbox2.addSpacing(70)
vbox_imu = QVBoxLayout()
hbox2.addLayout(vbox_imu)
vbox_imu.addSpacing(70)
label = QLabel("")
label.setAlignment(Qt.AlignRight)
label.setFont(self.font)
label.setStyleSheet(self.label_sheet)
self.updates.append((label, "Voltage:", ("voltage", 0), "%2.2f V"))
vbox_imu.addWidget(label)
label = QLabel("")
label.setAlignment(Qt.AlignRight)
label.setFont(self.font)
label.setStyleSheet(self.label_sheet)
self.updates.append((label, "Current:", ("current", 0), "%2.2f A"))
vbox_imu.addWidget(label)
label = QLabel("")
label.setAlignment(Qt.AlignRight)
label.setFont(self.font)
label.setStyleSheet(self.label_sheet)
self.updates.append((label, "Temp:", ("temp", 0), "%2.2f °C"))
vbox_imu.addWidget(label)
hbox2.addSpacing(70)
qr = self.w.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.w.move(qr.topLeft())
self.w.show()
self.update_timer = QTimer(self.w)
self.update_timer.setSingleShot(False)
self.update_timer.timeout.connect(self.update)
self.update_timer.start(self.update_ms)
self.time_ms = 0
def ros_process(self):
data = DataSource(self.data_ser)
def update(self):
data = self.data_ser.read()
self.time_ms += self.update_ms
for label, prefix, label_ids, fmt_s in self.updates:
label.setText(("%s " + fmt_s) % (prefix, getattr(data, label_ids[0])[label_ids[1]]))
if __name__ == "__main__":
app = QApplication(sys.argv)
info = Info()
app.exec_()
|
the-stack_106_13861
|
import pathlib
import subprocess
import pytest
from pytest_notebook import execution, notebook
THIS_DIR = pathlib.Path(__file__).absolute().parent
EXAMPLES_DIR = THIS_DIR / ".." / "examples"
SH_PATHS = EXAMPLES_DIR.glob("*.sh")
NB_PATHS = EXAMPLES_DIR.glob("*.ipynb")
PY_PATHS = EXAMPLES_DIR.glob("*.py")
@pytest.mark.parametrize("nb_path", NB_PATHS)
def test_run_example_notebooks(nb_path):
"""Smoke test ensuring that example notebooks run without error.
The `pytest_notebook` package also includes regression test functionality against
saved notebook outputs, if we want to check that later.
"""
nb = notebook.load_notebook(nb_path)
execution.execute_notebook(nb, cwd=EXAMPLES_DIR, timeout=120)
@pytest.mark.parametrize("py_path", PY_PATHS)
def test_run_example_py_scripts(py_path):
"""Smoke test ensuring that python example scripts run without error."""
exit_code = subprocess.call(["python", py_path])
assert exit_code == 0
@pytest.mark.parametrize("sh_path", SH_PATHS)
def test_run_example_sh_scripts(sh_path):
"""Smoke test ensuring that shell example scripts run without error."""
exit_code = subprocess.call(["env", "bash", sh_path])
assert exit_code == 0
README_SNIPPET_PATHS = [EXAMPLES_DIR / "quickstart.sh"]
@pytest.mark.parametrize("snippet_path", README_SNIPPET_PATHS)
def test_example_snippets_are_in_readme(snippet_path):
"""Check that README.md examples haven't diverged from snippets."""
with open(snippet_path, "r") as f:
x = f.read()
with open("README.md") as f:
y = f.read()
assert x in y, f"{snippet_path} has diverged from README.md"
|
the-stack_106_13862
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.ascend.diagpart_run import diagpart_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_diagpart_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
## testflag,opfuncname,testRunArgs, dimArgs
("diagpart_001", diagpart_run, ((21, 21), "float32", "cce_diagpart_fp32"), ((1, 1),)),
("diagpart_002", diagpart_run, ((43, 43), "float16", "cce_diagpart_fp16"), ((1, 1),)),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
self.common_run(self.testarg)
def teardown(self):
self._log.info("============= {0} Teardown============".format(self.casename))
return
|
the-stack_106_13863
|
import math
# Distance between two points
def dist(p, q):
return math.hypot(p[0]-q[0], p[1] - q[1])
# Square distance between two points
def d2(p, q):
return (p[0] - q[0])**2 + (p[1] - q[1])**2
# Converts two points to a line (a, b, c),
# ax + by + c = 0
# if p == q, a = b = c = 0
def pts2line(p, q):
return (-q[1] + p[1],
q[0] - p[0],
p[0]*q[1] - p[1]*q[0])
# Distance from a point to a line,
# given that a != 0 or b != 0
def distl(l, p):
return (abs(l[0]*p[0] + l[1]*p[1] + l[2])
/math.hypot(l[0], l[1]))
# intersects two lines.
# if parallell, returnes False.
def inters(l1, l2):
a1,b1,c1 = l1
a2,b2,c2 = l2
cp = a1*b2 - a2*b1
if cp != 0:
return float(b1*c2 - b2*c1)/cp, float(a2*c1 - a1*c2)/cp
else:
return False
# projects a point on a line
def project(l, p):
a, b, c = l
return ((b*(b*p[0] - a*p[1]) - a*c)/(a*a + b*b),
(a*(a*p[1] - b*p[0]) - b*c)/(a*a + b*b))
# Intersections between circles
def intersections(c1, c2):
if c1[2] > c2[2]:
c1, c2 = c2, c1
x1, y1, r1 = c1
x2, y2, r2 = c2
if x1 == x2 and y1 == y2 and r1 == r2:
return False
dist2 = (x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2)
rsq = (r1 + r2)*(r1 + r2)
if dist2 > rsq or dist2 < (r1-r2)*(r1-r2):
return []
elif dist2 == rsq:
cx = x1 + (x2-x1)*r1/(r1+r2)
cy = y1 + (y2-y1)*r1/(r1+r2)
return [(cx, cy)]
elif dist2 == (r1-r2)*(r1-r2):
cx = x1 - (x2-x1)*r1/(r2-r1)
cy = y1 - (y2-y1)*r1/(r2-r1)
return [(cx, cy)]
d = math.sqrt(dist2)
f = (r1*r1 - r2*r2 + dist2)/(2*dist2)
xf = x1 + f*(x2-x1)
yf = y1 + f*(y2-y1)
dx = xf-x1
dy = yf-y1
h = math.sqrt(r1*r1 - dx*dx - dy*dy)
norm = abs(math.hypot(dx, dy))
p1 = (xf + h*(-dy)/norm, yf + h*(dx)/norm)
p2 = (xf + h*(dy)/norm, yf + h*(-dx)/norm)
return sorted([p1, p2])
# Finds the bisector through origo
# between two points by normalizing.
def bisector(p1, p2):
d1 = math.hypot(p1[0], p2[1])
d2 = math.hypot(p2[0], p2[1])
return ((p1[0]/d1 + p2[0]/d2),
(p1[1]/d1 + p2[1]/d2))
# Distance from P to origo
def norm(P):
return (P[0]**2 + P[1]**2 + P[2]**2)**(0.5)
# Finds ditance between point p
# and line A + t*u in 3D
def dist3D(A, u, p):
AP = tuple(A[i] - p[i] for i in range(3))
cross = tuple(AP[i]*u[(i+1)%3] - AP[(i+1)%3]*u[i]
for i in range(3))
return norm(cross)/norm(u)
def vec(p1, p2):
return p2[0]-p1[0], p2[1] - p1[1]
def sign(x):
if x < 0: return -1
return 1 if x > 0 else 0
def cross(u, v):
return u[0] * v[1] - u[1] * v[0]
# s1: (Point, Point)
# s2: (Point, Point)
# Point : (x, y)
# returns true if intersecting s1 & s2 shares at least 1 point.
def segment_intersect(s1, s2):
u = vec(*s1)
v = vec(*s2)
p1, p2 = s1
q1, q2 = s2
d1 = cross(u, vec(p1, q1))
d2 = cross(u, vec(p1, q2))
d3 = cross(v, vec(q1, p1))
d4 = cross(v, vec(q1, p2))
if d1 * d2 * d3 * d4 == 0:
return True
return sign(d1) != sign(d2) and sign(d3) != sign(d4)
|
the-stack_106_13865
|
import logging
import os
import sys
# Set up path so that tests can find SUT package
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# Set up standadr logging config
_logger = logging.getLogger()
print()
if _logger is None:
_logger = logging.basicConfig()
print("Using new logger")
else:
print("Using existing logger")
_logger.setLevel(logging.DEBUG)
_logging_handler = logging.StreamHandler()
_logging_handler.setFormatter(logging.Formatter("%(levelname)-10s %(message)s"))
_logger.addHandler(_logging_handler)
|
the-stack_106_13867
|
"""Implementation of packaging-related magic functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2018 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
import re
import shlex
import sys
from IPython.core.magic import Magics, magics_class, line_magic
def _is_conda_environment():
"""Return True if the current Python executable is in a conda env"""
# TODO: does this need to change on windows?
conda_history = os.path.join(sys.prefix, 'conda-meta', 'history')
return os.path.exists(conda_history)
def _get_conda_executable():
"""Find the path to the conda executable"""
# Check if there is a conda executable in the same directory as the Python executable.
# This is the case within conda's root environment.
conda = os.path.join(os.path.dirname(sys.executable), 'conda')
if os.path.isfile(conda):
return conda
# Otherwise, attempt to extract the executable from conda history.
# This applies in any conda environment.
R = re.compile(r"^#\s*cmd:\s*(?P<command>.*conda)\s[create|install]")
with open(os.path.join(sys.prefix, 'conda-meta', 'history')) as f:
for line in f:
match = R.match(line)
if match:
return match.groupdict()['command']
# Fallback: assume conda is available on the system path.
return "conda"
CONDA_COMMANDS_REQUIRING_PREFIX = {
'install', 'list', 'remove', 'uninstall', 'update', 'upgrade',
}
CONDA_COMMANDS_REQUIRING_YES = {
'install', 'remove', 'uninstall', 'update', 'upgrade',
}
CONDA_ENV_FLAGS = {'-p', '--prefix', '-n', '--name'}
CONDA_YES_FLAGS = {'-y', '--y'}
@magics_class
class PackagingMagics(Magics):
"""Magics related to packaging & installation"""
@line_magic
def pip(self, line):
"""Run the pip package manager within the current kernel.
Usage:
%pip install [pkgs]
"""
python = sys.executable
if sys.platform == "win32":
python = '"' + python + '"'
else:
python = shlex.quote(python)
self.shell.system(" ".join([python, "-m", "pip", line]))
print("Note: you may need to restart the kernel to use updated packages.")
@line_magic
def conda(self, line):
"""Run the conda package manager within the current kernel.
Usage:
%conda install [pkgs]
"""
if not _is_conda_environment():
raise ValueError("The python kernel does not appear to be a conda environment. "
"Please use ``%pip install`` instead.")
conda = _get_conda_executable()
args = shlex.split(line)
command = args[0]
args = args[1:]
extra_args = []
# When the subprocess does not allow us to respond "yes" during the installation,
# we need to insert --yes in the argument list for some commands
stdin_disabled = getattr(self.shell, 'kernel', None) is not None
needs_yes = command in CONDA_COMMANDS_REQUIRING_YES
has_yes = set(args).intersection(CONDA_YES_FLAGS)
if stdin_disabled and needs_yes and not has_yes:
extra_args.append("--yes")
# Add --prefix to point conda installation to the current environment
needs_prefix = command in CONDA_COMMANDS_REQUIRING_PREFIX
has_prefix = set(args).intersection(CONDA_ENV_FLAGS)
if needs_prefix and not has_prefix:
extra_args.extend(["--prefix", sys.prefix])
self.shell.system(' '.join([conda, command] + extra_args + args))
print("\nNote: you may need to restart the kernel to use updated packages.")
|
the-stack_106_13868
|
import copy
import sys
import uuid
from collections import namedtuple, OrderedDict, Hashable
from six import with_metaclass
from .exception import (PypherException, PypherAliasException,
PypherArgumentException)
from .partial import Partial
CHECK_CUSTOM_CLASHES = True
_LINKS = {}
_MODULE = sys.modules[__name__]
_PREDEFINED_STATEMENTS = [['Match',], ['Create',], ['Merge',], ['Delete',],
['Remove',], ['Drop',], ['Where',], ['OrderBy', 'ORDER BY'],
['Set',], ['Skip',], ['Limit',], ['Return',], ['Unwind',], ['ASSERT'],
['Detach'], ['DetachDelete', 'DETACH DELETE'], ['Foreach'], ['Load'],
['CSV'], ['FROM'], ['Headers'], ['LoadCsvFrom', 'LOAD CSV FROM'],
['LoadCSVWithHeadersFrom', 'LOAD CSV WITH HEADERS FROM'], ['WITH'],
['UsingPeriodIcCommit', 'USING PERIODIC COMMIT'], ['Periodic'], ['Commit'],
['FieldTerminator', 'FIELDTERMINATOR'], ['Optional', 'OPTIONAL'],
['OptionalMatch', 'OPTIONAL MATCH'], ['Desc'], ['When'], ['ELSE'],
['Case'], ['End'], ['OnCreateSet', 'ON CREATE SET'],
['OnMatchSet', 'ON MATCH SET'], ['CreateIndexOn', 'CREATE INDEX ON'],
['UsingIndex', 'USING INDEX'], ['DropIndexOn', 'DROP INDEX ON'],
['CreateConstraintOn', 'CREATE CONSTRAINT ON'], ['OnCreate', 'ON CREATE'],
['DropConstraintOn', 'DROP CONSTRAINT ON'], ['WHEN'], ['THEN'], ['NOT'],
['XOR'], ['NULL'], ['IS_NULL', 'IS NULL'], ['IS_NOT_NULL', 'IS NOT NULL'],
['OR'], ['IS'], ['CONTAINS']]
_PREDEFINED_FUNCTIONS = [['size',], ['reverse',], ['head',], ['tail',],
['last',], ['extract',], ['filter',], ['reduce',], ['Type', 'type',],
['startNode',], ['endNode',], ['count',], ['collect',],
['sum',], ['percentileDisc',], ['stDev',], ['coalesce',], ['timestamp',],
['toInteger',], ['toFloat',], ['toBoolean',], ['keys',], ['properties',],
['length',], ['nodes',], ['relationships',], ['point',], ['distance',],
['abs',], ['rand',], ['ROUND', 'round',], ['CEIL', 'ceil',],
['Floor', 'floor',], ['sqrt',], ['sign',], ['sin',], ['cos',], ['tan',],
['cot',], ['asin',], ['acos',], ['atan',], ['atanZ',], ['haversin',],
['degrees',], ['radians',], ['pi',], ['log10',], ['log',], ['exp',],
['E', 'e'], ['toString',], ['replace',], ['substring',], ['left',],
['right',], ['trim',], ['ltrim',], ['toUpper',], ['toLower',],
['SPLIT', 'split',],['exists',], ['distinct', 'distinct', True],
['MAX', 'max']]
RELATIONSHIP_DIRECTIONS = {
'-': 'undirected',
'>': 'out',
'<': 'in',
}
def create_function(name, attrs=None, func_raw=False):
"""
This is a utility function that is used to dynamically create new
Func or FuncRaw objects.
Custom functions can be created and then used in Pypher:
create_function('MyCustomFunction', {'name': 'MY_CUSTOM_FUNCTION'})
p = Pypher()
p.MyCustomFunction('one', 2, 'C')
str(p) # MY_CUSTOM_FUNCTION($_PY_1123_1, $_PY_1123_2, $_PY_1123_3)
:param str name: the name of the Func object that will be created. This
value is used when the Pypher instance is converted to a string
:param dict attrs: any attributes that are passed into the Func constructor
options include `name` which will override the name param and will be
used when the Pypher instance is converted to a string
:param func_raw bool: A flag stating if a FuncRaw instance should be crated
instead of a Func instance
:return None
"""
attrs = attrs or {}
func = Func if not func_raw else FuncRaw
setattr(_MODULE, name, type(name, (func,), attrs))
def create_statement(name, attrs=None):
"""
This is a utility function that is used to dynamically create a new
Statement object.
:param str name: the name of the Func object that will be created. This
value is used when the Pypher instance is converted to a string
:param dict attrs: any attributes that are passed into the Func constructor
options include `name` which will override the name param and will be
used when the Pypher instance is converted to a string
:return None
"""
attrs = attrs or {}
setattr(_MODULE, name, type(name, (Statement,), attrs))
class Param(object):
"""
This object handles setting a named parameter for use in Pypher instances.
Anytime Pypher.bind_param is called, this object is created.
:param str name: The name of the parameter that will be used in place
of a value in the resuliting Cypher string
:param value: The value of the parameter that is bound to the resulting
Cypher string
"""
def __init__(self, name, value):
self.name = name.lstrip('$')
self.value = value
self.placeholder = '$' + self.name
class Params(object):
"""
This object is used to collect Param objects that are bound to the Pypher
instance and all of its included instances. Anytime a Pypher instance is
added to an existing instance, its Params objects are merged so that the
parent instance handles all of the Params.
:param string prefix: an optional prefix value for any Param objects that
are created when the bind_param method is called without a defined
name
:param string key: a key that should be unique to each Params instance that
will be used when Param objects are created with the bind_param method
that do not have an existing name
"""
def __init__(self, prefix=None, key=None, pypher=None):
self.prefix = prefix + '_' if prefix else ''
self.key = key or str(uuid.uuid4())[-5:]
self.pypher = pypher
self._bound_params = {}
def reset(self):
"""
Method used to reset the Param objects that are currently registered
with the instance of the Params object.
:return: None
"""
self._bound_params = {}
def clone(self):
"""
Method used to create a copy of the current instance with all of the
Param objects copied in the _bound_params attribute
:return: a new instance with the same _bound_params values
:rtype: Params
"""
params = Params(prefix=self.prefix, key=self.key)
params._bound_params = copy.deepcopy(self._bound_params)
return params
@property
def bound_params(self):
return OrderedDict(sorted(self._bound_params.items()))
def bind_params(self, params=None):
if not params:
return self
if isinstance(params, dict):
for name, value in params.items():
self.bind_param(value, name)
else:
for value in params:
self.bind_param(value)
return self.bound_params
def bind_param(self, value, name=None):
bind = True
is_pypher = False
# we want None values to be assigned as the keyword NULL in the
# resulting Cypher
if value is None:
value = __.NULL
if isinstance(value, Param):
name = value.name
value = value.value
# we will skip binding the value if it is a Pypher instance
if isinstance(value, Pypher):
value.parent = self.pypher.parent
value = str(value)
bind = False
is_pypher = True
if bind and value in self._bound_params.values():
for k, v in self._bound_params.items():
if v == value and type(v) == type(value):
name = k
break
elif bind and value in self._bound_params.keys():
for k, v in self._bound_params.items():
if k == value:
name = k
value = v
break
if not name:
name = self.param_name()
param = Param(name=name, value=value)
self._bound_params[param.name] = param.value
# if the value was a Pypher instance, we want to override the
# .placeholder property with the resulting Cypher string and not
# a variable
if is_pypher:
param.placeholder = value
return param
def param_name(self, name=None):
return '{}{}_{}'.format(name or self.prefix, self.key,
len(self.bound_params))
def __iadd__(self, other):
self.bind_params(other.bound_params)
return self
class _Link(type):
def __new__(cls, name, bases, attrs):
cls = super(_Link, cls).__new__(cls, name, bases, attrs)
aliases = attrs.get('_ALIASES', None)
_LINKS[name.lower()] = cls
if aliases:
for alias in aliases:
alias_low = alias.lower()
if CHECK_CUSTOM_CLASHES:
if alias in _LINKS:
error = ('The alias: "{}" defined in "{}" is already'
' used by "{}"'.format(alias, name, _LINKS[alias]))
raise PypherAliasException(error)
elif alias_low in _LINKS:
error = ('The alias: "{}" defined in "{}" is already'
' used by "{}"'.format(alias, name,
_LINKS[alias_low]))
raise PypherAliasException(error)
_LINKS[alias] = cls
_LINKS[alias_low] = cls
return cls
class Pypher(with_metaclass(_Link)):
PARAM_PREFIX = '$NEO'
def __init__(self, parent=None, params=None, *args, **kwargs):
self._ = self
self._parent = parent
self.next = None
self.params = params or Params(prefix=self.PARAM_PREFIX)
def reset(self):
self.next = None
self.params = Params(prefix=self.PARAM_PREFIX)
def _get_parent(self):
return self._parent
def _set_parent(self, parent):
if not parent:
return self
self._parent = parent
parent.params += self.params
self.params = parent.params
return self
parent = property(_get_parent, _set_parent)
@property
def bound_params(self):
return self.params.bound_params
def safely_stringify_for_pudb(self):
return None
def bind_params(self, params=None):
self.params.pypher = self
return self.params.bind_params(params=params)
def bind_param(self, value, name=None):
self.params.pypher = self
return self.params.bind_param(value=value, name=name)
def __getattr__(self, attr):
attr_low = attr.lower()
if attr_low[:2] == '__' and attr_low[-2:] == '__':
link = Property(name=attr.strip('__'))
elif attr_low in _LINKS:
link = _LINKS[attr_low]()
else:
link = Statement(name=attr)
return self.add_link(link)
def __call__(self, *args, **kwargs):
if ('name' not in kwargs and '_name' in self._bottom.__dict__
and type(self._bottom) is Statement):
kwargs['name'] = self._bottom.name
func = self._bottom.__class__(*args, **kwargs)
return self.remove_link(self._bottom).add_link(func)
def __getitem__(self, *args):
comp = List(parent=self, *args)
return self.add_link(comp)
def __str__(self):
return self.__unicode__()
def __unicode__(self):
token = self.next
prev = token
tokens = []
while token:
token.parent = self
pre = ''
suff = ''
if token._CLEAR_PRECEEDING_WS:
try:
tokens[-1] = tokens[-1].rstrip()
except Exception as e:
pass
if token._ADD_PRECEEDING_WS:
try:
skip = tokens[-1][-1] == ' '
except Exception as e:
skip = False
if not skip:
pre = ' '
if token._ADD_SUCEEDING_WS:
suff = ' '
part = '{}{}{}'.format(pre, str(token), suff)
tokens.append(part)
prev = token
token = token.next
return ''.join(tokens).strip()
def __add__(self, other):
return self.operator(operator='+', value=other)
def __radd__(self, other):
return self.operator(operator='+', value=other, inverse=True)
def __iadd__(self, other):
return self.operator(operator='+=', value=other)
def __sub__(self, other):
return self.operator(operator='-', value=other)
def __rsub__(self, other):
return self.operator(operator='-', value=other, inverse=True)
def __isub__(self, other):
return self.operator(operator='-=', value=other)
def __mul__(self, other):
return self.operator(operator='*', value=other)
def __rmul__(self, other):
return self.operator(operator='*', value=other, inverse=True)
def __imul__(self, other):
return self.operator(operator='*=', value=other)
def __div__(self, other):
return self.operator(operator='/', value=other)
def __rdiv__(self, other):
return self.operator(operator='/', value=other, inverse=True)
def __idiv__(self, other):
return self.operator(operator='/=', value=other)
def __truediv__(self, other):
return self.__div__(other=other)
def __rtruediv__(self, other):
return self.__rdiv__(other=other)
def __itruediv__(self, other):
return self.__idiv__(other=other)
def __mod__(self, other):
return self.operator(operator='%', value=other)
def __rmod__(self, other):
return self.operator(operator='%', value=other, inverse=True)
def __imod__(self, other):
return self.operator(operator='%=', value=other)
def __and__(self, other):
return self.operator(operator='&', value=other)
def __rand__(self, other):
return self.operator(operator='&', value=other, inverse=True)
def __or__(self, other):
return self.operator(operator='|', value=other)
def __ror__(self, other):
return self.operator(operator='|', value=other, inverse=True)
def __xor__(self, other):
return self.operator(operator='^', value=other)
def __rxor__(self, other):
return self.operator(operator='^', value=other, inverse=True)
def __ixor__(self, other):
return self.operator(operator='^=', value=other)
def __gt__(self, other):
return self.operator(operator='>', value=other)
def __ge__(self, other):
return self.operator(operator='>=', value=other)
def __lt__(self, other):
return self.operator(operator='<', value=other)
def __le__(self, other):
return self.operator(operator='<=', value=other)
def __ne__(self, other):
return self.operator(operator='<>', value=other)
def __eq__(self, other):
return self.operator(operator='=', value=other)
def operator(self, operator, value, inverse=False):
op = Operator(operator=operator, value=value, inverse=inverse)
return self.add_link(op, before_self=inverse)
def property(self, name):
prop = Property(name=name)
return self.add_link(prop)
def raw(self, *args):
raw = Raw(*args)
return self.add_link(raw)
def rel_out(self, *args, **kwargs):
kwargs['direction'] = 'out'
rel = Relationship(*args, **kwargs)
return self.add_link(rel)
def rel_in(self, *args, **kwargs):
kwargs['direction'] = 'in'
rel = Relationship(*args, **kwargs)
return self.add_link(rel)
def func(self, name, *args, **kwargs):
kwargs['name'] = name
func = Func(*args, **kwargs)
return self.add_link(func)
def func_raw(self, name, *args, **kwargs):
kwargs['name'] = name
func = FuncRaw(*args, **kwargs)
return self.add_link(func)
def link(self, name):
statement = Statement(name=name)
return self.add_link(statement)
def apply_partial(self, partial):
partial.pypher = self
partial.build()
return self
def add_link(self, link, before_self=False):
if before_self:
link.parent = self.parent or self
link.next = self.next
self.next = link
return self
link.parent = self
token = self.next
if not token:
self.next = link
self._bottom = link
return self
while token:
try:
token.next.next
token = token.next
continue
except Exception as e:
token.next = link
self._bottom = link
break
return self
def remove_link(self, remove):
link = self.next
if not link:
return self
elif id(link) == id(remove):
self.next = None
self._bottom = None
return self
while link:
if id(link.next) == id(remove):
link.next = link.next.next
break
link = link.next
return self
def append(self, pypher):
token = self.next
if not token:
self.next = pypher.next
self._bottom = pypher.next
while token:
try:
token.next.next
token = token.next
except Exception as e:
token.next = pypher.next
self._bottom = pypher.next
break
return self
def clone(self, pypher=None):
pypher = Pypher()
link = self.next
nxt = pypher
while link:
try:
clone = link.__class__()
clone.__dict__ = copy.copy(link.__dict__)
clone.__dict__['next'] = None
link = link.next
nxt.next = clone
nxt._bottom = clone
nxt = clone
except Exception as e:
break
return pypher
class _BaseLink(Pypher):
_CLEAR_PRECEEDING_WS = False
_ADD_PRECEEDING_WS = False
_ADD_SUCEEDING_WS = True
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
super(_BaseLink, self).__init__()
def __unicode__(self):
return self.__class__.__name__.upper()
class Statement(_BaseLink):
_ADD_PRECEEDING_WS = True
_ADD_SUCEEDING_WS = True
_CAPITALIZE = True
def __init__(self, *args, **kwargs):
try:
self._name = kwargs.pop('name')
except:
self._name = None
super(Statement, self).__init__(*args, **kwargs)
@property
def name(self):
if self._name:
return self._name
if self._CAPITALIZE:
return self.__class__.__name__.upper()
return self.__class__.__name__
def __unicode__(self):
if self.args:
parts = []
for arg in self.args:
if isinstance(arg, (Pypher, Partial)):
arg.parent = self.parent
elif isinstance(arg, Param):
self.bind_param(arg)
arg = arg.placeholder
parts.append(str(arg))
parts = ', '.join(parts)
return '{} {}'.format(self.name, parts)
return self.name
class Property(Statement):
_ADD_PRECEEDING_WS = False
_CLEAR_PRECEEDING_WS = True
_ALIASES = ['prop',]
def __init__(self, name=None):
super(Property, self).__init__(name=name)
def __unicode__(self):
return '.`{}`'.format(self.name)
class Label(Statement):
_ADD_PRECEEDING_WS = False
_CLEAR_PRECEEDING_WS = True
_ALLOWED_OPERATORS = {
'+': ':',
'|': '|',
}
def __init__(self, labels=None, default_operator='+'):
self._labels = []
self.labels = labels
self._operator = '+'
self.operator = default_operator
super(Label, self).__init__()
def _set_label(self, labels):
if not labels:
labels = []
elif not isinstance(labels, (list, set, tuple)):
labels = [labels]
self._labels = labels
def _get_label(self):
return self._labels
labels = property(_get_label, _set_label)
def _get_operator(self):
return self._ALLOWED_OPERATORS[self._operator]
def _set_operator(self, operator):
if operator not in self._ALLOWED_OPERATORS:
raise
self._operator = operator
operator = property(_get_operator, _set_operator)
def __unicode__(self):
if not self.labels:
return ''
labels = ['`{}`'.format(a) for a in self.labels]
labels = ('{}'.format(self.operator)).join(labels)
return ':{labels}'.format(labels=labels)
class IN(Statement):
def __unicode__(self):
args = []
for arg in self.args:
if isinstance(arg, (Pypher, Partial)):
arg.parent = self.parent
value = str(arg)
else:
param = self.bind_param(arg)
value = param.placeholder
args.append(value)
args = ', '.join(args)
return 'IN [{args}]'.format(args=args)
class Func(Statement):
_CAPITALIZE = False
def get_args(self):
args = []
for arg in self.args:
if isinstance(arg, (Pypher, Partial)):
arg.parent = self.parent
value = str(arg)
else:
param = self.bind_param(arg)
value = param.placeholder
args.append(value)
return ', '.join(args)
def __unicode__(self):
args = self.get_args()
return '{function}({args})'.format(function=self.name,
args=args)
class FuncRaw(Func):
def get_args(self):
args = []
for arg in self.args:
if isinstance(arg, (Pypher, Partial)):
arg.parent = self.parent
args.append(str(arg))
return ', '.join(args)
class ID(FuncRaw):
name = 'id'
class Raw(Statement):
def __unicode__(self):
args = []
for arg in self.args:
if isinstance(arg, (Pypher, Partial)):
arg.parent = self.parent
args.append(str(arg))
args = ' '.join(args)
return '{args}'.format(args=args)
class Conditional(Func):
_ADD_PRECEEDING_WS = True
_ADD_SUCEEDING_WS = True
_SEPARATOR = ', '
def __unicode__(self):
parts = []
for arg in self.args:
if isinstance(arg, (Pypher, Partial)):
arg.parent = self.parent
value = str(arg)
else:
param = self.bind_param(arg)
value = param.placeholder
parts.append(value)
parts = self._SEPARATOR.join(parts)
return '({})'.format(parts)
class ConditionalAND(Conditional):
_SEPARATOR = ' AND '
_ALIASES = ['CAND', 'COND_AND']
class ConditionalOR(Conditional):
_SEPARATOR = ' OR '
_ALIASES = ['COR', 'COND_OR']
class _APOCBitwiseBase(Func):
def __unicode__(self):
def fix(arg):
if isinstance(arg, (Pypher, Partial)):
arg.parent = self.parent
value = str(arg)
else:
param = self.bind_param(arg)
value = param.placeholder
return value
if not isinstance(self.args, list):
self.args = list(self.args)
left = fix(self.args.pop(0))
if len(self.args) > 1:
bw = self.__class__(*self.args)
bw.parent = self.parent
right = str(bw)
else:
right = fix(self.args[0])
return 'apoc.bitwise.op({}, "{}", {})'.format(left, self._OPERATOR,
right)
class BitwiseAnd(_APOCBitwiseBase):
_ALIASES = ['BAND',]
_OPERATOR = '&'
class BitwiseOr(_APOCBitwiseBase):
_ALIASES = ['BOR',]
_OPERATOR = '|'
class BitwiseXOr(_APOCBitwiseBase):
_ALIASES = ['BXOR',]
_OPERATOR = '^'
class BitwiseNot(_APOCBitwiseBase):
_ALIASES = ['BNOT',]
_OPERATOR = '~'
class BitwiseLeftShift(_APOCBitwiseBase):
_ALIASES = ['BLSHIFT',]
_OPERATOR = '>>'
class BitwiseRightShift(_APOCBitwiseBase):
_ALIASES = ['BRSHIFT',]
_OPERATOR = '<<'
class BitwiseUnsighedLeftShift(_APOCBitwiseBase):
_ALIASES = ['BULSHIFT',]
_OPERATOR = '>>>'
class List(_BaseLink):
_ADD_PRECEEDING_WS = False
_CLEAR_PRECEEDING_WS = True
def __unicode__(self):
args = []
for arg in self.args:
if isinstance(arg, Pypher):
value = str(arg)
arg.parent = self.parent
elif isinstance(arg, Partial):
value = str(arg)
else:
param = self.bind_param(arg)
value = param.placeholder
args.append(value)
args = ', '.join(args)
return '[{args}]'.format(args=args)
class Comprehension(List):
_ADD_PRECEEDING_WS = True
_CLEAR_PRECEEDING_WS = False
_ALIASES = ['comp']
class Map(_BaseLink):
_ADD_PRECEEDING_WS = True
def __unicode__(self):
body = []
def prep_value(value, name=None):
if isinstance(value, (list, set, tuple)):
values = []
for v in value:
if isinstance(v, (Pypher, Partial)):
v.parent = self.parent
values.append(str(v))
else:
param = self.bind_param(v)
values.append(param.placeholder)
values = ', '.join(values)
return '[{}]'.format(values)
if isinstance(value, (Pypher, Partial)):
value.parent = self.parent
elif name:
name = self.params.param_name(name)
param = self.bind_param(value=value, name=name)
return param.placeholder
return str(value)
for arg in self.args:
body.append(prep_value(arg))
kwargs = OrderedDict(sorted(self.kwargs.items()))
for k, val in kwargs.items():
pair = '`{}`: {}'.format(k, prep_value(val, k))
body.append(pair)
body = ', '.join(body)
return '{{{}}}'.format(body)
class MapProjection(Map):
_ALIASES = ['map_projection', 'projection',]
def __init__(self, _name=None, *args, **kwargs):
super(MapProjection, self).__init__(*args, **kwargs)
self.name = _name
def __unicode__(self):
_map = super(MapProjection, self).__unicode__()
return '{} {}'.format(self.name, _map)
class Operator(_BaseLink):
_ADD_PRECEEDING_WS = True
_ADD_SUCEEDING_WS = False
_BIND_PARAMS = True
def __init__(self, value=None, operator=None, inverse=False):
self.operator = operator or self.operator
self.value = value
self.inverse = inverse
super(Operator, self).__init__()
def _get_value(self):
return self._value
def _set_value(self, value):
# None should result to NULL
if value is None:
value = __.NULL
self._value = value
return self
value = property(_get_value, _set_value)
def __unicode__(self):
operator = self.operator
if isinstance(self.value, (Pypher, Partial)):
self.value.parent = self.parent
value = str(self.value)
elif isinstance(self.value, dict):
# TODO: abstract this logoic out to be used for entity params
def params(item):
new = []
is_dict = isinstance(item, dict)
if is_dict:
item = OrderedDict(sorted(item.items()))
for k, v in item.items():
if isinstance(v, (list, set, tuple, dict)):
v = params(v)
elif self._BIND_PARAMS:
param = self.bind_param(v)
v = param.placeholder
new.append('`{}`: {}'.format(k, v))
return '{{{}}}'.format(', '.join(new))
else:
for v in item:
is_seq = isinstance(v, (list, set, tuple, dict))
if is_seq:
v = params(v)
elif self._BIND_PARAMS:
param = self.bind_param(v)
v = param.placeholder
new.append(v)
return '[{}]'.format(', '.join(new))
return new
value = params(self.value)
elif self._BIND_PARAMS:
param = self.bind_param(self.value)
value = param.placeholder
else:
value = self.value
if self.inverse:
operator, value = value, operator
return '{} {}'.format(operator, value)
class OperatorRaw(Operator):
_BIND_PARAMS = False
class AND(Operator):
operator = 'AND'
class OR(Operator):
operator = 'OR'
class Assign(Operator):
operator = '='
class Alias(OperatorRaw):
_ALIASES = ['AS',]
operator = 'AS'
class Rexp(Operator):
_ALIASES = ['re',]
operator = '=~'
class Entity(_BaseLink):
_ADD_PRECEEDING_WS = False
_ADD_SUCEEDING_WS = False
_CLEAR_PRECEEDING_WS = False
_LABEL_OPERATOR = '+'
def __init__(self, variable=None, labels=None, **properties):
if not isinstance(labels, Label):
labels = Label(labels)
labels.operator = self._LABEL_OPERATOR
self.variable = variable or ''
self._labels = labels
self._properties = OrderedDict(sorted(properties.items()))
super(Entity, self).__init__()
@property
def labels(self):
variable = self.variable
labels = str(self._labels)
if labels:
if variable:
return '{variable}{labels}'.format(variable=variable,
labels=labels)
else:
return '{labels}'.format(labels=labels)
return variable
@property
def properties(self):
properties = []
for k, v in self._properties.items():
name = self.params.param_name(k)
param = self.bind_param(value=v, name=name)
properties.append('`{key}`: {val}'.format(key=k,
val=param.placeholder))
if properties:
return '{{{props}}}'.format(props=', '.join(properties))
return ''
class Node(Entity):
_ALIASES = ['n_',]
def __unicode__(self):
properties = self.properties
if properties:
properties = ' ' + properties
return '({labels}{properties})'.format(labels=self.labels,
properties=properties)
class Relationship(Entity):
_ALIASES = ['rel', 'r_']
_DEFAULT_DIRECTION = 'undirected'
_DIRECTIONS = {
'undirected': '-{}-',
'in': '<-{}-',
'out': '-{}->',
}
_LABEL_OPERATOR = '|'
def __init__(self, variable=None, labels=None, types=None, direction=None,
hops=None, min_hops=None, max_hops=None, **properties):
labels = types or labels
super(Relationship, self).__init__(variable=variable, labels=labels,
**properties)
self._direction = None
self.direction = direction
if hops is None:
if min_hops is None and max_hops is None:
# hops are not specified
self.hops = []
else:
# at least one of hops is not None
# empty string gets interpreted as open bound
# e.g. ["", 3] -> "..3" and [3, ""] -> "3.."
min_hops = "" if min_hops is None else str(min_hops)
max_hops = "" if max_hops is None else str(max_hops)
if min_hops == max_hops:
# depcrated functionality, use hops instead
self.hops = [min_hops]
else:
self.hops = [min_hops, max_hops]
else:
if min_hops is not None or max_hops is not None:
raise ValueError("If 'hops' is specified, do not specify 'min_hops' or 'max_hops'")
self.hops = [str(hops)]
@property
def variable_length(self):
if self.hops:
return '*{}'.format('..'.join(self.hops))
return ''
def _get_direction(self):
direction = self._direction.lower()
return self._DIRECTIONS[direction]
def _set_direction(self, direction=None):
if not direction:
direction = self._DEFAULT_DIRECTION
elif direction in RELATIONSHIP_DIRECTIONS:
direction = RELATIONSHIP_DIRECTIONS[direction]
elif direction in RELATIONSHIP_DIRECTIONS.values():
direction = direction
else:
error = 'The direction: {} is not valid'.format(direction)
raise PypherArgumentException(error)
self._direction = direction
direction = property(_get_direction, _set_direction)
def __unicode__(self):
properties = self.properties
labels = self.labels
hops = self.variable_length
if properties:
properties = ' ' + properties
if labels or properties or hops:
fill = '[{labels}{properties}{hops}]'.format(labels=labels,
properties=properties, hops=hops)
else:
fill = ''
return self.direction.format(fill)
class Anon(object):
def __init__(self):
pass
def __getattr__(self, attr):
py = Pypher()
getattr(py, attr)
return py
def __call__(self, *args, **kwargs):
return Pypher()
# Create an anonymous Pypher factory
__ = Anon()
# dynamically create all pre defined Statments and functions
for state in _PREDEFINED_STATEMENTS:
name = state[0]
try:
attrs = {'name': state[1]}
except Exception as e:
attrs = {}
create_statement(name=name, attrs=attrs)
for fun in _PREDEFINED_FUNCTIONS:
name = fun[0]
try:
attrs = {'name': fun[1]}
except Exception as e:
attrs = {'name': name}
try:
func_raw = bool(fun[2])
except Exception as e:
func_raw = False
create_function(name=name, attrs=attrs, func_raw=func_raw)
|
the-stack_106_13869
|
# coding:utf-8
# datetime: 2020/5/15 7:27 PM
# software: PyCharm
# File: app_start
LICENSE = 'Copyright 2019.'
import time
import subprocess
class Phone:
def __init__(self, d=None, u2url=None,brand=None, logger=None):
self.exlude_app = ["com.tencent.mm",'com.github.uiautomator', 'com.github.uiautomator.test']
self.d = d
if not brand:
self.brand = self.d.device_info['brand']
else:
self.brand = brand
self.logger = logger
self.attention = {
"com.android.packageinstaller": [u"确定", u"安装", u"下一步", u"好", u"允许", u"我知道"],
"com.miui.securitycenter": [u"继续安装"], # xiaomi
"com.lbe.security.miui": [u"允许"], # xiaomi
"android": [u"好", u"安装"], # vivo
"com.huawei.systemmanager": [u"立即删除"], # huawei
"com.android.systemui": [u"同意"], # 锤子
}
self.attention_install = {
"letv" : {
'adb_install_allow' :{'times': 5,
'when': [ "//*[@resource-id='android:id/le_bottomsheet_btn_confirm_5']", "不再提示" ],
'call': (('text','不再提示'), ('text','允许') )}
},
"xiaomi": {
'adb_install_allow': {'times': 5,
'when': [],
'call': None }
},
"vivo": {
'adb_IMEI_allow': {'times': 5,
'when': ["//*[@resource-id='android:id/alertTitle']", "知道了"],
'call': ( ('text', '知道了'), ('text', '好') )},
'adb_nomarket_allow': {'times': 5,
'when': ["安全警告","好"],
'call': (('text', '好'))},
'adb_install_allow': {'times': 5,
'when': ["//*[@resource-id='vivo:id/vivo_adb_install_ok_button']","安装"],
'call': (('text', '安装'), ('resourceId', 'vivo:id/vivo_adb_install_ok_button'))}
},
"meizu": {
'adb_install_allow': {'times': 5,
'when': [],
'call': None }
},
"oppo": {
'adb_nomarket_allow': {'times': 5,
'when': ["安装风险", "允许"],
'call': ( ('text', '允许'), ('resourceId', 'android:id/button2') )},
'adb_install_allow': {'times': 5,
'when': ["//*[@resource-id='com.android.packageinstaller:id/permission_list']",
"安装"],
'call': ( ('text', '安装'), ('resourceId', 'com.android.packageinstaller:id/bottom_button_two') )}
},
}
self.attention_install['LeEco'.lower()] = self.attention_install['letv']
self.attention_start = {
"letv": {
'allow_out_app': {'times': 5,
'when': ["//*[@resource-id='android:id/le_bottomsheet_btn_confirm_5']", "允许"],
'call': (('text', '允许'))},
'allow_in_app': {'times': 3,
'when': ["//*[@resource-id='android:id/le_bottomsheet_btn_confirm_5']", "允许"],
'call': ( ('text', '不再提示'), ('text', '允许') )}
},
"xiaomi": {
'allow_in_app': {'times': 5,
'when': [ "//*[@resource-id='com.lbe.security.miui:id/perm_desc_root']", "允许" ],
'call': ( ('text', '不再提示'), ('text', '允许') )}
},
"vivo": {
'allow_in_app': {'times': 5,
'when': ["权限请求", "允许"],
'call': ( ('text', '允许'), ('resourceId', 'android:id/button1') )},
},
"meizu": {
'allow_in_app': {'times': 5,
'when': ["//*[@resource-id='android:id/title_template']", "允许" ],
'call': ( ('text', '允许'), ('resourceId', 'android:id/button1') )}
},
"oppo": {
'allow_in_app': {'times': 5,
'when': ["//*[@resource-id='android:id/title_template']", "允许"],
'call': ( ('text', '允许'), ('resourceId', 'android:id/button1') )},
},
}
self.attention_start[ 'LeEco'.lower() ] = self.attention_start['letv']
def permission_call_func(self, *args, **kwargs ):
for item in args:
if self.d(**{item[0]:item[1]}).exists:
self.d(**{item[0]:item[1]}).click()
return True
def register_watcher(self, attention=None):
if not attention:
return False
for key,value in attention.items():
watcher = self.d.watcher(key, value['times'])
if not value['when']:
continue
for wh in value['when']:
watcher = watcher.when(wh)
if not value['call']:
continue
watcher.call( self.permission_call_func, *value['call'] )
return True
def permission_install(self):
device_brand = self.brand
if device_brand.lower() in self.attention_install:
attention = self.attention_install[ device_brand.lower() ]
self.register_watcher(attention = attention)
return True
for key,value in self.attention_install.items():
self.register_watcher(attention=value)
self.d.watcher.start()
return True
def permission_start(self):
device_brand = self.brand
if device_brand.lower() in self.attention_start:
attention = self.attention_start[ device_brand.lower() ]
self.register_watcher(attention = attention)
return True
for key,value in self.attention_start.items():
self.register_watcher(attention=value)
self.d.watcher.start()
return True
def permission_message_install(self):
# 乐视提示是否adb可安装
# d(resourceId="android:id/le_bottomsheet_btn_chk_ctn") 不再提示框
if self.brand == 'letv' or self.brand == 'leeco':
self.d.watcher('adb_install_allow', 5).when(
"//*[@resource-id='android:id/le_bottomsheet_btn_confirm_5']").when("不再提示").call(
self.permission_call_func, ('text','不再提示'), ('text','允许'))
# self.d.watcher('adb_install_allow', 5).when("不再提示").call(
# self.permission_call_func, ('text', '不再提示'), ('text', '允许'))
elif self.brand == 'xiaomi':# 小米 没有提示
pass
elif self.brand == 'meizu': # meizu 没有安全提示警告
pass
elif self.brand == 'vivo':
# vivo 提示安全警告
self.d.watcher('adb_IMEI_allow', 5).when(
"//*[@resource-id='android:id/alertTitle']").when("知道了").call(
self.permission_call_func, ('text', '知道了'), ('text', '好')) #这个是防止vivo的IMEI MEID无效
self.d.watcher('adb_nomarket_allow', 5).when("//android.widget.TextView[@text='安全警告']").call(
self.permission_call_func, ('text', '好'))
self.d.watcher('adb_install_allow', 5).when(
"//*[@resource-id='vivo:id/vivo_adb_install_ok_button']").when("安装").call(
self.permission_call_func, ('text', '安装'), ('resourceId', 'vivo:id/vivo_adb_install_ok_button'))
elif self.brand == 'oppo':
# oppo 非市场应用 # 下面是点击安装按钮
self.d.watcher('adb_nomarket_allow', 5).when("安装风险").when("允许").call(
self.permission_call_func, ('text', '允许'), ('resourceId', 'android:id/button2'))
self.d.watcher('adb_install_allow', 5).when(
"//*[@resource-id='com.android.packageinstaller:id/permission_list']").when("安装").call(
self.permission_call_func, ('text', '安装'), ('resourceId', 'com.android.packageinstaller:id/bottom_button_two'))
else:
return False
self.d.watcher.start()
return True
def permission_message_start(self):
# 乐视在打开页面弹出确认
# d(resourceId="android:id/le_bottomsheet_btn_chk_ctn") 不再提示框
if self.brand == 'letv' or self.brand == 'leeco':
self.d.watcher('allowout', 5).when(
"//*[@resource-id='android:id/le_bottomsheet_btn_confirm_5']").when("允许").call(
self.permission_call_func, ('text', '允许'))
# 乐视在app内部弹出确认
# self.d(resourceId="com.android.packageinstaller:id/permission_message").exists text=要允许刷宝短视频使用此设备的位置信息吗?
# self.d(resourceId="com.android.packageinstaller:id/permission_allow_button").click() text=允许
# self.d(resourceId="com.android.packageinstaller:id/permission_deny_button").click() text=拒绝
self.d.watcher('allowin', 3).when(
"//*[@resource-id='com.android.packageinstaller:id/permission_message']").when("允许").call(
self.permission_call_func, ('text', '不再提示'), ('text', '允许'))
elif self.brand == 'xiaomi':
# 小米手机弹出框 照片媒体文件 卫星网络定位 IMEI-IMSI-手机号码权限
#d(resourceId="com.lbe.security.miui:id/desc_container")
#d(resourceId="com.lbe.security.miui:id/perm_desc_root")
#d(resourceId="com.lbe.security.miui:id/permission_list")
self.d.watcher('allowin_lbe', 5).when(
"//*[@resource-id='com.lbe.security.miui:id/perm_desc_root']").when("允许").call(
self.permission_call_func, ('text', '不再提示'), ('text', '允许'))
self.d.watcher('allowin_android', 5).when(
"//*[@resource-id='android:id/button1']").when("允许").call(
self.permission_call_func, ('text', '不再提示'), ('text', '允许'))
elif self.brand == 'vivo':
# vivo手机弹出框 手机状态 拍照 录音 位置 通讯录 sdka
# d(resourceId="vivo:id/rememberCB") 不再提示
# d(resourceId="vivo:id/confirm_msg")
# d(resourceId="android:id/title_template")
# d(resourceId="vivo:id/hint_msg") d(resourceId="android:id/button1") 允许
self.d.watcher('allowin', 5).when("权限请求").when("允许").call(
self.permission_call_func, ('text', '允许'), ('resourceId', 'android:id/button1'))
#这里还有个i管家 取消的按钮
elif self.brand == 'meizu':
# meizu
# d(resourceId="android:id/title_template") d(resourceId="android:id/topPanel")
# d(resourceId="android:id/button1") 允许 d(resourceId="android:id/buttonPanel")
self.d.watcher('allowin', 5).when(
"//*[@resource-id='android:id/title_template']").when("允许").call(
self.permission_call_func, ('text', '允许'), ('resourceId', 'android:id/button1'))
elif self.brand == 'oppo':
# oppo 权限
self.d.watcher('allowin', 5).when(
"//*[@resource-id='android:id/title_template']").when("允许").call(
self.permission_call_func, ('text', '允许'), ('resourceId', 'android:id/button1'))
else:
return False
self.d.watcher.start()
return True
# TODO apk更新 apk安装 apk权限
# 针对 oppo vivo 机型需要优化, 两个品牌在安装的时候,刚开始安装就出现安装完成,所有在后面要有等待过程
def detect_start_app(self, app_info=None, app=None ,start=False, location=False): # 请求更新 以及 安装
'''
:param app_info: app_info = {
"app_alias_name": "ju_news",
"app_package_name": "com.xiangzi.jukandian",
}
:param app:{
'appalias': 'wechat',
'chinese': '微信',
'packagename': 'com.tencent.mm',
'host_url': 'http://dldir1.qq.com/weixin/android/weixin705android1440.apk',
'mi_url': 'https://b6.market.xiaomi.com/download/AppStore/087c84ba1fb4c3299bfc2b1cd9d5bf0315943dff0/com.tencent.mm.apk',
'tencent_url': 'https://d4975263df62a5727ed5f2e8637b3c74.dd.cdntips.com/imtt.dd.qq.com/16891/apk/B1E9D2F728BAD42741673019E6FC8986.apk',
'pan_url': None,
'version': '7.0.10'
}
:param start:
:param location:
:return:
'''
# step1 检查更新信息
if not app_info:
return False
if not app:
app = self.appinfo(appinfo=app_info)
if not app:
self.logger.error("appalias %s info is %s" % (app_info["app_alias_name"], app))
return False
self.logger.debug("appalias %s info is %s" % (app_info["app_alias_name"], app))
if app: #如果没有拿到请求就直接启动app看能否启动
install =False
try:
info = self.d.app_info(app_info["app_package_name"])
if info["versionName"] < app["version"]:
install = True
except:
install = True
if install:
# self.d.press('home') # 回到主页面,否则可能会扰乱权限按钮的解析
# TODO watcher start
urls = [app["tencent_url"], app["mi_url"],
app["host_url"], app["pan_url"]]
watcher_status = self.permission_message_install()
for url in urls:
try:
self.d.app_install(url)
break
except:
pass
if watcher_status:
self.d.watcher.reset()
self.logger.debug("appalias %s install completed." % (app_info["app_alias_name"]))
if start:
# step2 启动app, 这里非常要注意,oppo vivo
watcher_status = self.permission_message_start()
try:
if self.d.app_current()["package"] == app_info["app_package_name"]:
self.exlude_app.append(app_info["app_package_name"])
self.stop_all_app()
self.d.app_start(app_info["app_package_name"]) # 为了防止跳到其他app上
else:
self.d.press("home")
self.stop_all_app()
self.start_app(app_info,location=location)
except:
self.logger.error("正在运行%s, 启动失败..." % self.d.app_info(app_info["app_package_name"])['label'] )
# time.sleep(5) #为了防止打开app还没来得及点击权限确认按钮
self.logger.info("正在运行%s, 启动成功..." % self.d.app_info(app_info["app_package_name"])['label'] )
if watcher_status:
self.d.watcher.reset()
return app
def subprocess(self,cmd=None):
if not cmd:
return None
try:
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
out = p.stdout.read()
result = out.decode().strip()
return result
except:
return None
def pmlist_package(self,string=None):
if not string:
return []
result = []
for line in string.strip().split('\n'):
if ':' not in line:
continue
nline = line.strip().split(':')
result.append(nline[1].strip())
return result
def list_packages(self):
try:
result = self.subprocess(''' pm list packages -3 ''')
result = self.pmlist_package(string=result)
return result
except:
return []
def stop_all_app(self):
kill_pkgs = set(self.d.app_list_running()).difference(self.exlude_app)
kill_pkgs = list(set(self.list_packages()).intersection(set(kill_pkgs)))
if not kill_pkgs:
return True
for pkg in kill_pkgs:
self.d.app_stop(pkg)
return True
def close_all_app(self):
try:
self.d.press('recent')
if self.d(resourceId="com.android.systemui:id/clearAnimView").exists: #小米
self.d(resourceId="com.android.systemui:id/clearAnimView").click()
return True
if self.d(resourceId="com.android.systemui:id/leui_recent_clear_all_txtview").exists: #乐视
self.d(resourceId="com.android.systemui:id/leui_recent_clear_all_txtview").click()
return True
if self.d(resourceId="com.android.systemui:id/clear_all_icon").exists: # meizu
self.d(resourceId="com.android.systemui:id/clear_all_icon").click()
return True
if self.d(resourceId="com.coloros.recents:id/clear_button").exists:
self.d(resourceId="com.coloros.recents:id/clear_button").click()
return True
if self.d(text="一键加速").exists: # vivo 机型
self.d(text="一键加速").click()
# time.sleep(1)
# self.d.press('back')
# Swipe(self.d).swipeUp_from_bottom()
return True
if self.d(resourceId="com.coloros.recents:id/progress_bar").exists:
self.d(resourceId="com.coloros.recents:id/progress_bar").click()
return True
if self.d(resourceId="com.android.systemui:id/leui_recent_clear_all_btn").exists: #乐视 这个可能有bug,点不到
self.d(resourceId="com.android.systemui:id/leui_recent_clear_all_btn").click()
return True
self.d.press('back')
return False
except:
return False
def start_app(self,app_info, location=False):
if not self.d.device_info:
self.logger.info("%s IP device is closed connect! reconnect again! ")
return False
self.close_all_app() # 点击关闭所有的app activity
self.d.press('home')
start_time = time.time()
while True:
try:
self.d.app_start(app_info["app_package_name"])
time.sleep(5) # maybe is starting
if self.d.app_current()["package"] == app_info["app_package_name"]:
break
except:
self.logger.error("Start %s error, maybe app is installing, otherwise atx is running."
% app_info["app_alias_name"])
time.sleep(10)
if time.time() - start_time >= 60: #1分钟都没启动完就报错
raise Exception("Start %s error, check atx is running or \
app is installing." % app_info["app_alias_name"])
return True
# Example
# Phone(d, u2url='0.0.0.0:7912').detect_start_app(app_info=app_info, start=start)
|
the-stack_106_13870
|
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLString
class infrared2CameraInfoTopicName(XMLString):
_NAME = 'infrared2CameraInfoTopicName'
_TYPE = 'sdf'
def __init__(self, default='none'):
super().__init__(default)
|
the-stack_106_13872
|
# https://leetcode.com/problems/last-stone-weight/
import heapq
import bisect
class Solution(object):
# Submitted by Jiganesh
# TC : O(N*NLogN)
# SC : O(1)
def lastStoneWeight(self, stones):
"""
:type stones: List[int]
:rtype: int
"""
while len(stones)>=2:
stones.sort()
y = stones.pop()
x = stones.pop()
if y-x :stones.append(y-x)
if stones:
return stones[-1]
else:
return 0
# Approach 2: Using a Max Heap [Accepted]
def lastStoneWeight(self, A):
h = [-x for x in A]
heapq.heapify(h)
while len(h) > 1 and h[0] != 0:
heapq.heappush(h, heapq.heappop(h) - heapq.heappop(h))
return -h[0]
# Approach 3: Using bisect Sort [Accepted]
def lastStoneWeight(self, stones):
stones.sort()
while len(stones)>1:
x = stones.pop()
y = stones.pop()
if x != y : bisect.insort(stones, x-y)
return stones[0] if stones else 0
|
the-stack_106_13876
|
"""
Base configuration management
This file only deals with non-GUI configuration features
(in other words, we won't import any PyQt object here, avoiding any
sip API incompatibility issue in spyder's non-gui modules)
"""
from __future__ import division, absolute_import
import sys
import os
import os.path as osp
import shutil
import platform
STDERR = sys.stderr
# =============================================================================
# Configuration paths
# =============================================================================
def get_home_dir():
"""
Returns user home directory. This will be determined from the first
valid result out of (osp.expanduser('~'), $HOME, $USERPROFILE, $TMP).
"""
try:
# expanduser() returns a raw byte string which needs to be
# decoded with the codec that the OS is using to represent
# file paths.
path = osp.expanduser('~')
except Exception:
path = ''
if osp.isdir(path):
return path
else:
# Get home from alternative locations
for env_var in ('HOME', 'USERPROFILE', 'TMP'):
# os.environ.get() returns a raw byte string which needs to be
# decoded with the codec that the OS is using to represent
# environment variables.
path = os.environ.get(env_var, '')
if osp.isdir(path):
return path
else:
path = ''
if not path:
raise RuntimeError('Please set the environment variable HOME to '
'your user/home directory.')
def get_conf_path(subfolder=None, filename=None, create=True):
"""
Returns the default config path for the platform. This will be:
- macOS: '~/Library/Application Support/<subfolder>/<filename>.'
- Linux: 'XDG_CONFIG_HOME/<subfolder>/<filename>'
- other: '~/.config/<subfolder>/<filename>'
:param str subfolder: The subfolder for the app.
:param str filename: The filename to append for the app.
:param bool create: If ``True``, the folder '<subfolder>' will be created on-demand.
"""
# Define conf_dir
if platform.system() == 'Linux':
# This makes us follow the XDG standard to save our settings
# on Linux
xdg_config_home = os.environ.get('XDG_CONFIG_HOME', '')
if not xdg_config_home:
xdg_config_home = osp.join(get_home_dir(), '.config')
if create and not osp.isdir(xdg_config_home):
os.makedirs(xdg_config_home)
conf_dir = osp.join(xdg_config_home, subfolder)
elif platform.system() == 'Darwin':
conf_dir = osp.join(get_home_dir(), 'Library', 'Application Support', subfolder)
else:
conf_dir = osp.join(get_home_dir(), '.config', subfolder)
# Create conf_dir
if create and not osp.isdir(conf_dir):
os.mkdir(conf_dir)
if filename is None:
return conf_dir
else:
return osp.join(conf_dir, filename)
def get_old_conf_path(subfolder=None, filename=None):
"""Return absolute path to the config file with the specified filename."""
# Define conf_dir
conf_dir = osp.join(get_home_dir(), subfolder)
if filename is None:
return conf_dir
else:
return osp.join(conf_dir, filename)
# =============================================================================
# Reset config files
# =============================================================================
def reset_config_files(subfolder, saved_config_files):
"""Remove all config files"""
print("*** Reset settings to defaults ***", file=STDERR)
for fname in saved_config_files:
cfg_fname = get_conf_path(subfolder, fname)
if osp.isfile(cfg_fname) or osp.islink(cfg_fname):
os.remove(cfg_fname)
elif osp.isdir(cfg_fname):
shutil.rmtree(cfg_fname)
else:
continue
print("removing:", cfg_fname, file=STDERR)
# =============================================================================
# Migrate config files
# =============================================================================
# code to migrate from old config file locations to new locations
# config files will be stored in '$XDG_CONFIG_HOME/maestral' in Linux (or
# '~/.config/maestral' if $XDG_CONFIG_HOME is not set) and in '~/Library/Application
# Support/maestral' on macOS.
def migrate_config_files():
import os
import shutil
old_path = get_old_conf_path('.maestral')
new_path = get_conf_path('maestral', create=False)
if os.path.isdir(old_path):
shutil.copytree(old_path, new_path)
shutil.rmtree(old_path)
print("Migrated config files.")
|
the-stack_106_13877
|
#!/usr/bin/env python
"""
The JSON import/export module.
This is rudimentary for now, assuming everything is well-formed.
"""
import sys
import odml
import json
try:
unicode = unicode
except NameError:
unicode = str
class OdmlSerializer(object):
"""
Converts the odml class hierarchy to dictionaries and lists
"""
def __init__(self, odml_document):
self.doc = odml_document
@staticmethod
def save_element(e):
"""
Returns an xml node for the odML object e
"""
fmt = e._format
cur = {'_type': fmt.__class__.__name__}
# Generate elements
for k in fmt._args:
if not hasattr(e, fmt.map(k)):
continue
val = getattr(e, fmt.map(k))
if val is None:
continue
if isinstance(val, list):
for v in val:
ele = OdmlSerializer.save_element(v)
cur.setdefault(k, []).append(ele)
else:
if sys.version_info < (3, 0):
cur[k] = unicode(val)
else:
cur[k] = str(val)
return cur
JSON_VERSION = "1"
class JSONWriter(OdmlSerializer):
def __unicode__(self):
doc = self.save_element(self.doc)
doc['_version'] = JSON_VERSION
return json.dumps(doc)
def __str__(self):
doc = self.save_element(self.doc)
doc['_version'] = JSON_VERSION
return json.dumps(doc)
def write_file(self, filename):
if sys.version_info < (3, 0):
data = unicode(self)
else:
data = str(self)
f = open(filename, "w")
f.write(data)
f.close()
class OdmlReader(object):
"""
Opposite of OdmlSerializer: converts dictionaries representing
odml objects back to their classes
"""
def to_odml(self, obj):
fmt = getattr(odml.format, obj['_type'])
kargs = {}
objects = []
for k in fmt._args:
v = obj.get(k, None)
if isinstance(v, list):
for i, nobj in enumerate(v):
objects.append(self.to_odml(nobj))
elif v is not None:
kargs[fmt.map(k)] = v
return getattr(self, "create_%s" % fmt._name)(fmt, kargs, obj, objects)
def create_odML(self, fmt, kargs, obj, children):
obj = fmt.create(**kargs)
for i in children:
obj.append(i)
return obj
create_section = create_odML
create_value = create_odML
def create_property(self, fmt, kargs, obj, children):
kargs['value'] = children
return self.create_odML(fmt, kargs, obj, [])
class JSONReader(OdmlReader):
def fromString(self, data):
obj = json.loads(data)
return self.to_odml(obj)
def fromFile(self, infile):
return self.fromString(infile.read())
if __name__ == "__main__":
# import sys
y = JSONReader().fromFile(sys.stdin)
import dumper
dumper.dumpDoc(y)
|
the-stack_106_13878
|
# MIT License
#
# Copyright (c) 2017 Ilya Kostrikov and (c) 2020 Google LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import os
from typing import *
import sys
import time
import numpy as np
import torch
import gym
import my_pybullet_envs
import random
from matplotlib import pyplot as plt
import pickle
import joblib
from third_party.a2c_ppo_acktr.envs import VecPyTorch, make_vec_envs
from third_party.a2c_ppo_acktr.utils import get_render_func, get_vec_normalize
from third_party.a2c_ppo_acktr.arguments import parse_args_with_unknown
from gan.utils import *
def plot_avg_dis_prob(args, avg_reward_list, dxs):
env_name = args.env_name
_, axs = plt.subplots(2, 1)
axs[0].plot(avg_reward_list)
# plt.title('Average Dis Reward, Env: {}'.format(env_name))
plt.xlabel('steps')
# plt.ylabel('average reward')
axs[1].plot(dxs)
plt.show()
np.save(os.path.join('./imgs', env_name + '_avg_dreward.npy'), np.array(avg_reward_list))
plt.savefig(os.path.join('./imgs', env_name + '_avg_dreward.png'))
input("press enter plt")
def plot_avg_dis_prob_2(args, avg_reward_list, avg_reward_list_2, dxs):
env_name = args.env_name
_, axs = plt.subplots(2, 1)
axs[0].plot(avg_reward_list)
axs[0].plot(avg_reward_list_2)
# plt.title('Average Dis Reward, Env: {}'.format(env_name))
plt.xlabel('steps')
# plt.ylabel('average reward')
axs[1].plot(dxs)
plt.show()
np.save(os.path.join('./imgs', env_name + '_avg_dreward.npy'), np.array(avg_reward_list))
plt.savefig(os.path.join('./imgs', env_name + '_avg_dreward.png'))
input("press enter plt")
sys.path.append("third_party")
parser = argparse.ArgumentParser(description="RL")
parser.add_argument(
"--seed", type=int, default=1, help="random seed (default: 1)"
)
parser.add_argument(
"--env-name",
default="HumanoidSwimmerEnv-v1",
help="environment to load and test on",
)
parser.add_argument(
"--src-env-name",
default="",
help="environment to transfer policy from ("" if same as test env)",
)
parser.add_argument(
"--load-dir",
default="./trained_models/",
help="directory to save agent logs (default: ./trained_models/)",
)
parser.add_argument(
"--save-traj",
type=int,
default=0,
help="whether to save traj tuples",
)
parser.add_argument(
"--num-trajs",
type=int,
default=200,
help="how many trajs to rollout/store",
)
parser.add_argument(
"--save-path",
default="./tmp.pkl",
help="where the traj tuples are stored",
)
# parser.add_argument(
# "--load-dis",
# type=int,
# default=0,
# help="whether to load gail discriminator for debugging",
# )
# parser.add_argument(
# "--enlarge-act-range",
# type=float,
# default=0.15,
# help="add white noise to action during rollout",
# )
parser.add_argument(
"--non-det",
type=int,
default=0,
help="whether to use a non-deterministic policy, 1 true 0 false",
)
parser.add_argument(
"--iter",
type=int,
default=None,
help="which iter pi to test"
)
parser.add_argument(
"--r-thres",
type=int,
default=4000,
help="The threshold reward value above which it is considered a success.",
)
args, extra_dict = parse_args_with_unknown(parser)
np.set_printoptions(precision=2, suppress=None, threshold=sys.maxsize)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
is_cuda = False
device = "cuda" if is_cuda else "cpu"
args.det = not args.non_det
# If render is provided, use that. Otherwise, turn it on.
if "render" not in extra_dict:
extra_dict["render"] = True
env = make_vec_envs(
args.env_name,
args.seed + 1000,
1,
None,
None,
device=device,
allow_early_resets=False,
**extra_dict,
)
# dont know why there are so many wrappers in make_vec_envs...
env_core = env.venv.venv.envs[0].env.env
if args.src_env_name == "":
env_name_transfer = args.env_name
else:
env_name_transfer = args.src_env_name
actor_critic, ob_rms, recurrent_hidden_states, masks \
= load(args.load_dir, env_name_transfer, is_cuda, args.iter)
# discri = None
# if args.load_dis:
# discri = load_gail_discriminator(args.load_dir, env_name_transfer, is_cuda, args.iter)
if ob_rms:
print(ob_rms.mean)
print(ob_rms.var)
print(ob_rms.count)
input("ob_rms")
vec_norm = get_vec_normalize(env)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = ob_rms
all_trajs = {}
cur_traj = []
cur_traj_idx = 0
obs = env.reset()
# print("obs", obs)
# input("reset, press enter")
done = False
reward_total = 0
list_rewards = []
list_traj_lengths = []
list_r_per_step = []
dist = 0
last_dist = 0
dis_probs_imaginary = None
dis_probs_real = None
dxs = []
# if args.load_dis:
# dis_probs_imaginary = []
# dis_probs_real = []
while True:
# try:
# env_core.reset_counter = 5000
# except:
# pass
with torch.no_grad():
# value, action, _, recurrent_hidden_states = actor_critic.act(
# obs, recurrent_hidden_states, masks, deterministic=True
# )
# action += torch.normal(torch.zeros(action.size()), 0.1).to(device)
# print(action)
value, action, _, recurrent_hidden_states = actor_critic.act(
obs, recurrent_hidden_states, masks, deterministic=args.det
)
# # TODO, name duplicate
# # TODO parameter space noise
# # xx% noise before tanh
# action += (torch.rand(action.size()).to(device) - 0.5) * (args.enlarge_act_range * 2)
# # print(action)
# if args.save_traj:
# tuple_sas = []
# obs_feat = replace_obs_with_feat(obs, is_cuda, feat_select_func, return_tensor=False)
# tuple_sas.append(obs_feat[0]) # only one process env
#
# # save clamped action (note: dyn envs might have action larger than 1)
# action = action.clamp(-1., 1)
# print("obs", obs)
# print("act", torch.tanh(action))
# if args.load_dis:
# obs_feat = replace_obs_with_feat(obs, is_cuda, feat_select_func, return_tensor=True)
# dis_state = torch.cat((obs_feat, obs[:, env_core.behavior_obs_len:]), 1)
# Obser reward and next obs
obs, reward, done, info = env.step(action)
list_r_per_step.append(reward)
if args.save_traj:
sas_window = info[0]["sas_window"] # info[0] because 1-core dummy vec env.
# tuple_sas.append(list(unwrap(action, is_cuda=is_cuda)))
#
# obs_feat = replace_obs_with_feat(obs, is_cuda, feat_select_func, return_tensor=False)
# tuple_sas.append(obs_feat[0])
next_obs = list(unwrap(obs, is_cuda=is_cuda))
# print(sas_window)
cur_traj.append(sas_window)
# if args.load_dis:
# dis_action = replace_obs_with_feat(obs, is_cuda, feat_select_func, return_tensor=True)
# dis_r = discri.predict_prob_single_step(dis_state, dis_action)
# dis_probs_real.append(unwrap(dis_r, is_cuda=is_cuda))
# if len(dis_probs_real)>20 and np.mean(dis_probs_real[-20:]) < 0.4:
# done = True
# env.reset()
# try:
# obs_i = env_core.return_imaginary_obs()
# dis_action = obs_i[:env_core.behavior_obs_len] # dis action is next state
# dis_action = wrap(dis_action, is_cuda=is_cuda)
# dis_action = replace_obs_with_feat(dis_action, is_cuda, feat_select_func, return_tensor=True)
# dis_r = discri.predict_prob_single_step(dis_state, dis_action)
# dis_probs_imaginary.append(unwrap(dis_r, is_cuda=is_cuda))
# except:
# pass
# dxs.append(env_core.get_ave_dx())
try:
env_core.cam_track_torso_link()
last_dist = dist
dist = env_core.get_dist()
except:
print("not bullet locomotion env")
reward_total += reward.cpu().numpy()[0][0]
if done:
list_rewards.append(reward_total)
list_traj_lengths.append(len(list_r_per_step))
print(
f"{args.load_dir}\t"
f"tr: {reward_total:.1f}\t"
f"x: {last_dist:.2f}\t"
f"tr_ave: {reward_total/len(list_r_per_step):.2f}\t"
f"total_per_step_r_ave: {np.sum(list_rewards)/np.sum(list_traj_lengths):.2f}\t"
)
reward_total = 0.0
# env_core.reset_counter = 0
cur_traj_idx += 1
if cur_traj_idx >= args.num_trajs:
break
if args.save_traj:
print(np.array(cur_traj).shape)
all_trajs[cur_traj_idx] = cur_traj
cur_traj = []
# if args.load_dis:
# print(
# f"{np.array(dis_probs_real).mean()}\t"
# )
# # plot_avg_dis_prob_2(args, dis_probs_imaginary, dis_probs_real, list_r_per_step)
# dis_probs_imaginary = []
# dis_probs_real = []
# else:
# # plot_avg_dis_prob(args, list_r_per_step, dxs)
# pass
list_r_per_step = []
dxs = []
masks.fill_(0.0 if done else 1.0)
with open(args.save_path, "wb") as handle:
# print(all_trajs)
pickle.dump(all_trajs, handle, protocol=pickle.HIGHEST_PROTOCOL)
# joblib.dump(all_trajs, handle)
bins_list = np.arange(40) * 50.0
print(bins_list)
plt.hist(list_rewards, alpha=0.5, label='r hist', bins=bins_list)
plt.legend(loc='upper right')
plt.show()
|
the-stack_106_13879
|
from system_baseline.exceptions import FactValidationError
FACTS_MAXSIZE = 2 ** 20 # 1 MB
def check_for_duplicate_names(facts):
"""
check if any names are duplicated; raises an exception if duplicates are found.
"""
names = []
for fact in facts:
names.append(fact["name"])
if "values" in fact:
check_for_duplicate_names(fact["values"])
for name in names:
if names.count(name) > 1:
raise FactValidationError("name %s declared more than once" % name)
def check_for_value_values(facts):
"""
check if any fields have "value" and "values" both defined
"""
for fact in facts:
if "values" in fact and "value" in fact:
raise FactValidationError("fact %s cannot have value and values defined" % fact["name"])
elif "values" in fact:
check_for_value_values(fact["values"])
def check_for_empty_name_values(facts):
"""
check if any names are duplicated; raises an exception if duplicates are found.
"""
for fact in facts:
if "values" in fact:
check_for_empty_name_values(fact["values"])
if "name" in fact and not fact["name"]:
raise FactValidationError("fact name cannot be empty")
elif "value" in fact and not fact["value"]:
raise FactValidationError("value for %s cannot be empty" % fact["name"])
def check_for_invalid_whitespace_name_values(facts):
"""
check if any name or values have invalid whitespace at beginning and end; raises an exception.
"""
for fact in facts:
if "values" in fact:
check_for_invalid_whitespace_name_values(fact["values"])
if "name" in fact and not check_whitespace(fact["name"]):
raise FactValidationError("fact name cannot have leading or trailing whitespace")
elif "value" in fact:
if not isinstance(fact["value"], list):
if not check_whitespace(fact["value"]):
raise FactValidationError(
"value for %s cannot have leading or trailing whitespace" % fact["name"]
)
def check_whitespace(input_string):
"""
returns true if there is no leading or trailing whitespace, otherwise returns false.
"""
if input_string == input_string.lstrip() == input_string.rstrip():
return True
return False
def check_facts_length(facts):
"""
check if fact length is greater than FACTS_MAXSIZE
"""
if len(str(facts)) > FACTS_MAXSIZE:
raise FactValidationError("attempted to save fact list over %s bytes" % FACTS_MAXSIZE)
def check_name_value_length(facts):
"""
check the following lengths:
* name is over 500 char
* value is over 1000 char
"""
for fact in facts:
if "values" in fact:
check_name_value_length(fact["values"])
if "name" in fact and len(fact["name"]) > 500:
raise FactValidationError("fact name %s is over 500 characters" % fact["name"])
elif "value" in fact and len(fact["value"]) > 1000:
raise FactValidationError("value %s is over 1000 characters" % fact["value"])
|
the-stack_106_13880
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Common utility functions for command line commands."""
# pylint: disable=import-error
import os
import sys
import click
from tabulate import tabulate
def get_env_with_venv_bin():
"""Create a clone of the current running environment with the AIIDA_PATH variable set directory of the config."""
from aiida.manage.configuration import get_config
config = get_config()
currenv = os.environ.copy()
currenv['PATH'] = f"{os.path.dirname(sys.executable)}:{currenv['PATH']}"
currenv['AIIDA_PATH'] = config.dirpath
currenv['PYTHONUNBUFFERED'] = 'True'
return currenv
def format_local_time(timestamp, format_str='%Y-%m-%d %H:%M:%S'):
"""
Format a datetime object or UNIX timestamp in a human readable format
:param timestamp: a datetime object or a float representing a UNIX timestamp
:param format_str: optional string format to pass to strftime
"""
from aiida.common import timezone
if isinstance(timestamp, float):
return timezone.datetime.fromtimestamp(timestamp).strftime(format_str)
return timestamp.strftime(format_str)
def print_last_process_state_change(process_type=None):
"""
Print the last time that a process of the specified type has changed its state.
This function will also print a warning if the daemon is not running.
:param process_type: optional process type for which to get the latest state change timestamp.
Valid process types are either 'calculation' or 'work'.
"""
from aiida.cmdline.utils.echo import echo_info, echo_warning
from aiida.common import timezone
from aiida.common.utils import str_timedelta
from aiida.engine.daemon.client import get_daemon_client
from aiida.engine.utils import get_process_state_change_timestamp
client = get_daemon_client()
timestamp = get_process_state_change_timestamp(process_type)
if timestamp is None:
echo_info('last time an entry changed state: never')
else:
timedelta = timezone.delta(timestamp, timezone.now())
formatted = format_local_time(timestamp, format_str='at %H:%M:%S on %Y-%m-%d')
relative = str_timedelta(timedelta, negative_to_zero=True, max_num_fields=1)
echo_info(f'last time an entry changed state: {relative} ({formatted})')
if not client.is_daemon_running:
echo_warning('the daemon is not running', bold=True)
def get_node_summary(node):
"""Return a multi line string with a pretty formatted summary of a Node.
:param node: a Node instance
:return: a string summary of the node
"""
from plumpy import ProcessState
from aiida.orm import ProcessNode
table_headers = ['Property', 'Value']
table = []
if isinstance(node, ProcessNode):
table.append(['type', node.process_label])
try:
process_state = ProcessState(node.process_state)
except (AttributeError, ValueError):
pass
else:
process_state_string = process_state.value.capitalize()
if process_state == ProcessState.FINISHED and node.exit_message:
table.append(['state', f'{process_state_string} [{node.exit_status}] {node.exit_message}'])
elif process_state == ProcessState.FINISHED:
table.append(['state', f'{process_state_string} [{node.exit_status}]'])
elif process_state == ProcessState.EXCEPTED:
table.append(['state', f'{process_state_string} <{node.exception}>'])
else:
table.append(['state', process_state_string])
else:
table.append(['type', node.__class__.__name__])
table.append(['pk', str(node.pk)])
table.append(['uuid', str(node.uuid)])
table.append(['label', node.label])
table.append(['description', node.description])
table.append(['ctime', node.ctime])
table.append(['mtime', node.mtime])
try:
computer = node.computer
except AttributeError:
pass
else:
if computer is not None:
table.append(['computer', f'[{node.computer.pk}] {node.computer.label}'])
return tabulate(table, headers=table_headers)
def get_node_info(node, include_summary=True):
"""Return a multi line string of information about the given node, such as the incoming and outcoming links.
:param include_summary: boolean, if True, also include a summary of node properties
:return: a string summary of the node including a description of all its links and log messages
"""
from aiida.common.links import LinkType
from aiida import orm
if include_summary:
result = get_node_summary(node)
else:
result = ''
nodes_caller = node.get_incoming(link_type=(LinkType.CALL_CALC, LinkType.CALL_WORK))
nodes_called = node.get_outgoing(link_type=(LinkType.CALL_CALC, LinkType.CALL_WORK))
nodes_input = node.get_incoming(link_type=(LinkType.INPUT_CALC, LinkType.INPUT_WORK))
nodes_output = node.get_outgoing(link_type=(LinkType.CREATE, LinkType.RETURN))
if nodes_input:
result += f"\n{format_nested_links(nodes_input.nested(), headers=['Inputs', 'PK', 'Type'])}"
if nodes_output:
result += f"\n{format_nested_links(nodes_output.nested(), headers=['Outputs', 'PK', 'Type'])}"
if nodes_caller:
links = sorted(nodes_caller.all(), key=lambda x: x.node.ctime)
result += f"\n{format_flat_links(links, headers=['Caller', 'PK', 'Type'])}"
if nodes_called:
links = sorted(nodes_called.all(), key=lambda x: x.node.ctime)
result += f"\n{format_flat_links(links, headers=['Called', 'PK', 'Type'])}"
log_messages = orm.Log.objects.get_logs_for(node)
if log_messages:
table = []
table_headers = ['Log messages']
table.append([f'There are {len(log_messages)} log messages for this calculation'])
table.append([f"Run 'verdi process report {node.pk}' to see them"])
result += f'\n\n{tabulate(table, headers=table_headers)}'
return result
def format_flat_links(links, headers):
"""Given a flat list of LinkTriples, return a flat string representation.
:param links: a list of LinkTriples
:param headers: headers to use
:return: formatted string
"""
table = []
for link_triple in links:
table.append([link_triple.link_label, link_triple.node.pk, link_triple.node.get_attribute('process_label', '')])
result = f'\n{tabulate(table, headers=headers)}'
return result
def format_nested_links(links, headers):
"""Given a nested dictionary of nodes, return a nested string representation.
:param links: a nested dictionary of nodes
:param headers: headers to use
:return: nested formatted string
"""
import collections
import tabulate as tb
tb.PRESERVE_WHITESPACE = True
indent_size = 4
def format_recursive(links, depth=0):
"""Recursively format a dictionary of nodes into indented strings."""
rows = []
for label, value in links.items():
if isinstance(value, collections.Mapping):
rows.append([depth, label, '', ''])
rows.extend(format_recursive(value, depth=depth + 1))
else:
rows.append([depth, label, value.pk, value.__class__.__name__])
return rows
table = []
for depth, label, pk, class_name in format_recursive(links):
table.append([f"{' ' * (depth * indent_size)}{label}", pk, class_name])
result = f'\n{tabulate(table, headers=headers)}'
tb.PRESERVE_WHITESPACE = False
return result
def get_calcjob_report(calcjob):
"""
Return a multi line string representation of the log messages and output of a given calcjob
:param calcjob: the calcjob node
:return: a string representation of the log messages and scheduler output
"""
from aiida import orm
from aiida.common.datastructures import CalcJobState
log_messages = orm.Log.objects.get_logs_for(calcjob)
scheduler_out = calcjob.get_scheduler_stdout()
scheduler_err = calcjob.get_scheduler_stderr()
calcjob_state = calcjob.get_state()
scheduler_state = calcjob.get_scheduler_state()
report = []
if calcjob_state == CalcJobState.WITHSCHEDULER:
state_string = f"{calcjob_state}, scheduler state: {scheduler_state if scheduler_state else '(unknown)'}"
else:
state_string = f'{calcjob_state}'
label_string = f' [{calcjob.label}]' if calcjob.label else ''
report.append(f'*** {calcjob.pk}{label_string}: {state_string}')
if scheduler_out is None:
report.append('*** Scheduler output: N/A')
elif scheduler_out:
report.append(f'*** Scheduler output:\n{scheduler_out}')
else:
report.append('*** (empty scheduler output file)')
if scheduler_err is None:
report.append('*** Scheduler errors: N/A')
elif scheduler_err:
report.append(f'*** Scheduler errors:\n{scheduler_err}')
else:
report.append('*** (empty scheduler errors file)')
if log_messages:
report.append(f'*** {len(log_messages)} LOG MESSAGES:')
else:
report.append('*** 0 LOG MESSAGES')
for log in log_messages:
report.append(f'+-> {log.levelname} at {log.time}')
for message in log.message.splitlines():
report.append(f' | {message}')
return '\n'.join(report)
def get_process_function_report(node):
"""
Return a multi line string representation of the log messages and output of a given process function node
:param node: the node
:return: a string representation of the log messages
"""
from aiida import orm
report = []
for log in orm.Log.objects.get_logs_for(node):
report.append(f'{log.time:%Y-%m-%d %H:%M:%S} [{log.id}]: {log.message}')
return '\n'.join(report)
def get_workchain_report(node, levelname, indent_size=4, max_depth=None):
"""
Return a multi line string representation of the log messages and output of a given workchain
:param node: the workchain node
:return: a nested string representation of the log messages
"""
# pylint: disable=too-many-locals
import itertools
from aiida import orm
from aiida.common.log import LOG_LEVELS
def get_report_messages(uuid, depth, levelname):
"""Return list of log messages with given levelname and their depth for a node with a given uuid."""
node_id = orm.load_node(uuid).id
filters = {'dbnode_id': node_id}
entries = orm.Log.objects.find(filters)
entries = [entry for entry in entries if LOG_LEVELS[entry.levelname] >= LOG_LEVELS[levelname]]
return [(_, depth) for _ in entries]
def get_subtree(uuid, level=0):
"""
Get a nested tree of work calculation nodes and their nesting level starting from this uuid.
The result is a list of uuid of these nodes.
"""
builder = orm.QueryBuilder()
builder.append(cls=orm.WorkChainNode, filters={'uuid': uuid}, tag='workcalculation')
builder.append(
cls=orm.WorkChainNode,
project=['uuid'],
# In the future, we should specify here the type of link
# for now, CALL links are the only ones allowing calc-calc
# (we here really want instead to follow CALL links)
with_incoming='workcalculation',
tag='subworkchains'
)
result = builder.all(flat=True)
# This will return a single flat list of tuples, where the first element
# corresponds to the WorkChain pk and the second element is an integer
# that represents its level of nesting within the chain
return [(uuid, level)] + list(itertools.chain(*[get_subtree(subuuid, level=level + 1) for subuuid in result]))
workchain_tree = get_subtree(node.uuid)
if max_depth:
report_list = [
get_report_messages(uuid, depth, levelname) for uuid, depth in workchain_tree if depth < max_depth
]
else:
report_list = [get_report_messages(uuid, depth, levelname) for uuid, depth in workchain_tree]
reports = list(itertools.chain(*report_list))
reports.sort(key=lambda r: r[0].time)
if not reports:
return 'No log messages recorded for this entry'
log_ids = [entry[0].id for entry in reports]
levelnames = [len(entry[0].levelname) for entry in reports]
width_id = len(str(max(log_ids)))
width_levelname = max(levelnames)
report = []
for entry, depth in reports:
line = '{time:%Y-%m-%d %H:%M:%S} [{id:<{width_id}} | {levelname:>{width_levelname}}]:{indent} {message}'.format(
id=entry.id,
levelname=entry.levelname,
message=entry.message,
time=entry.time,
width_id=width_id,
width_levelname=width_levelname,
indent=' ' * (depth * indent_size)
)
report.append(line)
return '\n'.join(report)
def print_process_info(process):
"""Print detailed information about a process class and its process specification.
:param process: a :py:class:`~aiida.engine.processes.process.Process` class
"""
docstring = process.__doc__
if docstring is not None:
docstring = docstring.strip().split('\n')
if not docstring:
docstring = ['No description available']
click.secho('Description:\n', fg='red', bold=True)
for line in docstring:
click.echo(f' {line.lstrip()}')
click.echo()
print_process_spec(process.spec())
def print_process_spec(process_spec):
"""Print the process spec in a human-readable formatted way.
:param process_spec: a `ProcessSpec` instance
"""
def build_entries(ports):
"""Build a list of entries to be printed for a `PortNamespace.
:param ports: the port namespace
:return: list of tuples with port name, required, valid types and info strings
"""
result = []
for name, port in sorted(ports.items(), key=lambda x: (not x[1].required, x[0])):
if name.startswith('_'):
continue
valid_types = port.valid_type if isinstance(port.valid_type, (list, tuple)) else (port.valid_type,)
valid_types = ', '.join([valid_type.__name__ for valid_type in valid_types if valid_type is not None])
required = 'required' if port.required else 'optional'
info = port.help if port.help is not None else ''
info = f'{info[:75]} ...' if len(info) > 75 else info
result.append([name, required, valid_types, info])
return result
template = '{:>{width_name}s}: {:10s}{:{width_type}}{}'
inputs = build_entries(process_spec.inputs)
outputs = build_entries(process_spec.outputs)
max_width_name = max([len(entry[0]) for entry in inputs + outputs]) + 2
max_width_type = max([len(entry[2]) for entry in inputs + outputs]) + 2
if process_spec.inputs:
click.secho('Inputs:', fg='red', bold=True)
for entry in inputs:
if entry[1] == 'required':
click.secho(template.format(*entry, width_name=max_width_name, width_type=max_width_type), bold=True)
else:
click.secho(template.format(*entry, width_name=max_width_name, width_type=max_width_type))
if process_spec.outputs:
click.secho('Outputs:', fg='red', bold=True)
for entry in outputs:
if entry[1] == 'required':
click.secho(template.format(*entry, width_name=max_width_name, width_type=max_width_type), bold=True)
else:
click.secho(template.format(*entry, width_name=max_width_name, width_type=max_width_type))
if process_spec.exit_codes:
click.secho('Exit codes:', fg='red', bold=True)
for exit_code in sorted(process_spec.exit_codes.values(), key=lambda exit_code: exit_code.status):
message = exit_code.message.capitalize()
click.secho('{:>{width_name}d}: {}'.format(exit_code.status, message, width_name=max_width_name))
def get_num_workers():
"""
Get the number of active daemon workers from the circus client
"""
from aiida.common.exceptions import CircusCallError
from aiida.manage.manager import get_manager
manager = get_manager()
client = manager.get_daemon_client()
if client.is_daemon_running:
response = client.get_numprocesses()
if response['status'] != 'ok':
if response['status'] == client.DAEMON_ERROR_TIMEOUT:
raise CircusCallError('verdi thought the daemon was alive, but the call to the daemon timed-out')
elif response['status'] == client.DAEMON_ERROR_NOT_RUNNING:
raise CircusCallError('verdi thought the daemon was running, but really it is not')
else:
raise CircusCallError
try:
return response['numprocesses']
except KeyError:
raise CircusCallError('Circus did not return the number of daemon processes')
def check_worker_load(active_slots):
"""
Check if the percentage usage of the daemon worker slots exceeds a threshold.
If it does, print a warning.
The purpose of this check is to warn the user if they are close to running out of worker slots
which could lead to their processes becoming stuck indefinitely.
:param active_slots: the number of currently active worker slots
"""
from aiida.cmdline.utils import echo
from aiida.common.exceptions import CircusCallError
from aiida.manage.configuration import get_config
warning_threshold = 0.9 # 90%
config = get_config()
slots_per_worker = config.get_option('daemon.worker_process_slots', config.current_profile.name)
try:
active_workers = get_num_workers()
except CircusCallError:
echo.echo_critical('Could not contact Circus to get the number of active workers')
if active_workers is not None:
available_slots = active_workers * slots_per_worker
percent_load = (active_slots / available_slots)
if percent_load > warning_threshold:
echo.echo('') # New line
echo.echo_warning(f'{percent_load * 100:.0f}% of the available daemon worker slots have been used!')
echo.echo_warning("Increase the number of workers with 'verdi daemon incr'.\n")
|
the-stack_106_13883
|
# -*- coding: utf-8 -*-
"""
Shows sample call to webservice (easyborrow v2 compatible call).
Example openurls:
- article: 'rft.jtitle=Facial plastic surgery : FPS&rft.atitle=Anatomy for blepharoplasty and brow-lift.&rft.pages=177-85&rft.date=2010&rft.volume=26&rft.end_page=85&ctx_ver=Z39.88-2004&rft.genre=article'
- book: 'sid=FirstSearch%3AWorldCat&genre=book&isbn=9780688002305&title=Zen+and+the+art+of+motorcycle+maintenance%3A+an+inquiry+into+values%2C&date=1974&aulast=Pirsig&aufirst=Robert&auinitm=M&id=doi%3A&pid=673595%3Cfssessid%3E0%3C%2Ffssessid%3E&url_ver=Z39.88-2004&rfr_id=info%3Asid%2Ffirstsearch.oclc.org%3AWorldCat&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook&rft.genre=book&req_dat=%3Csessionid%3E0%3C%2Fsessionid%3E&rfe_dat=%3Caccessionnumber%3E673595%3C%2Faccessionnumber%3E&rft_id=info%3Aoclcnum%2F673595&rft_id=urn%3AISBN%3A9780688002305&rft.aulast=Pirsig&rft.aufirst=Robert&rft.auinitm=M&rft.btitle=Zen+and+the+art+of+motorcycle+maintenance%3A+an+inquiry+into+values%2C&rft.date=1974&rft.isbn=9780688002305&rft.place=New+York&rft.pub=Morrow&rft.genre=book&checksum=8bf1504d891b0a2551ab879c3a555a8c&title=Brown University&linktype=openurl&detail=RBN'
Example response:
{ "status": "submission_successful", "transaction_number": "2468" }
"""
import os, pprint, random
import requests
## settings
API_URL = os.environ['ILLIAD_WS_SAMPLE_SCRIPT__API_URL']
API_AUTH_KEY = os.environ['ILLIAD_WS_SAMPLE_SCRIPT__API_AUTH_KEY']
USERNAME = os.environ['ILLIAD_WS_SAMPLE_SCRIPT__TEST_USERNAME']
OPENURL = os.environ['ILLIAD_WS_SAMPLE_SCRIPT__TEST_OPENURL']
REQUEST_ID = random.randint( 1111, 9999 ) # used to easily see web-service log entries for a single request
## hit api
params = {
'auth_key': API_AUTH_KEY,
'username':USERNAME,
'openurl': OPENURL,
'request_id': REQUEST_ID
}
r = requests.post( API_URL, data=params )
## view response
pprint.pprint( r.content )
|
the-stack_106_13885
|
from django.conf import settings
from django.utils.module_loading import import_string
from django.views.generic.list import MultipleObjectMixin
from oscar.core.loading import get_class, get_model
BrowseCategoryForm = get_class('search.forms', 'BrowseCategoryForm')
SearchHandler = get_class('search.search_handlers', 'SearchHandler')
is_solr_supported = get_class('search.features', 'is_solr_supported')
is_elasticsearch_supported = get_class('search.features', 'is_elasticsearch_supported')
Product = get_model('catalogue', 'Product')
def get_product_search_handler_class():
"""
Determine the search handler to use.
Currently only Solr is supported as a search backend, so it falls
back to rudimentary category browsing if that isn't enabled.
"""
# Use get_class to ensure overridability
if settings.OSCAR_PRODUCT_SEARCH_HANDLER is not None:
return import_string(settings.OSCAR_PRODUCT_SEARCH_HANDLER)
if is_solr_supported():
return get_class('catalogue.search_handlers', 'SolrProductSearchHandler')
elif is_elasticsearch_supported():
return get_class(
'catalogue.search_handlers', 'ESProductSearchHandler',
)
else:
return get_class(
'catalogue.search_handlers', 'SimpleProductSearchHandler')
class SolrProductSearchHandler(SearchHandler):
"""
Search handler specialised for searching products. Comes with optional
category filtering. To be used with a Solr search backend.
"""
form_class = BrowseCategoryForm
model_whitelist = [Product]
paginate_by = settings.OSCAR_PRODUCTS_PER_PAGE
def __init__(self, request_data, full_path, categories=None):
self.categories = categories
super(SolrProductSearchHandler, self).__init__(request_data, full_path)
def get_search_queryset(self):
sqs = super(SolrProductSearchHandler, self).get_search_queryset()
if self.categories:
# We use 'narrow' API to ensure Solr's 'fq' filtering is used as
# opposed to filtering using 'q'.
pattern = ' OR '.join([
'"%s"' % c.full_name for c in self.categories])
sqs = sqs.narrow('category_exact:(%s)' % pattern)
return sqs
class ESProductSearchHandler(SearchHandler):
"""
Search handler specialised for searching products. Comes with optional
category filtering. To be used with an ElasticSearch search backend.
"""
form_class = BrowseCategoryForm
model_whitelist = [Product]
paginate_by = settings.OSCAR_PRODUCTS_PER_PAGE
def __init__(self, request_data, full_path, categories=None):
self.categories = categories
super(ESProductSearchHandler, self).__init__(request_data, full_path)
def get_search_queryset(self):
sqs = super(ESProductSearchHandler, self).get_search_queryset()
if self.categories:
for category in self.categories:
sqs = sqs.filter_or(category=category.full_name)
return sqs
class SimpleProductSearchHandler(MultipleObjectMixin):
"""
A basic implementation of the full-featured SearchHandler that has no
faceting support, but doesn't require a Haystack backend. It only
supports category browsing.
Note that is meant as a replacement search handler and not as a view
mixin; the mixin just does most of what we need it to do.
"""
paginate_by = settings.OSCAR_PRODUCTS_PER_PAGE
def __init__(self, request_data, full_path, categories=None):
self.categories = categories
self.kwargs = {'page': request_data.get('page', 1)}
self.object_list = self.get_queryset()
def get_queryset(self):
qs = Product.browsable.base_queryset()
if self.categories:
qs = qs.filter(categories__in=self.categories).distinct()
return qs
def get_search_context_data(self, context_object_name):
# Set the context_object_name instance property as it's needed
# internally by MultipleObjectMixin
self.context_object_name = context_object_name
context = self.get_context_data(object_list=self.object_list)
context[context_object_name] = context['page_obj'].object_list
return context
|
the-stack_106_13887
|
from abs import deeppoly
import matplotlib.pyplot as plt
import numpy as np
def plot_deeppoly(ele: deeppoly.Ele,
x_lim=(-1, 1),
y_lim=(-1, 1),
fig: plt.Figure = None):
lb = ele.lb().detach().numpy().squeeze()
ub = ele.ub().detach().numpy().squeeze()
step_size = 0.01
assert len(lb) == 2
h_bound_x = np.arange(lb[0], ub[0], step_size)
h_bound_y1 = np.full_like(h_bound_x, lb[1])
h_bound_y2 = np.full_like(h_bound_x, ub[1])
v_bound_y = np.arange(lb[1], ub[1], step_size)
v_bound_x1 = np.full_like(v_bound_y, lb[0])
v_bound_x2 = np.full_like(v_bound_y, ub[0])
if fig is None:
fig, ax = plt.subplots()
else:
ax = fig.get_axes()[0]
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
ax.grid(True)
ax.plot(h_bound_x, h_bound_y1, "r-", alpha=0.5)
ax.plot(h_bound_x, h_bound_y2, "r-", alpha=0.5)
ax.plot(v_bound_x1, v_bound_y, "r-", alpha=0.5)
ax.plot(v_bound_x2, v_bound_y, "r-", alpha=0.5)
ax.fill_between(h_bound_x, h_bound_y1, h_bound_y2, alpha=0.5)
return fig, ax
|
the-stack_106_13889
|
import numpy as np
from scipy.special import spherical_jn
from scipy.misc import derivative
from scipy.interpolate import InterpolatedUnivariateSpline as ius
from scipy.integrate import quadrature
import sys
sys.path.append('../utils/')
from tools import loginterp
j0 = lambda x: spherical_jn(0, x)
#Get model as parameter
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', help='model name to use', default='ModelA')
parser.add_argument('-s', '--size', help='for small or big box', default='big')
args = parser.parse_args()
print(args, args.model)
model = args.model
boxsize = args.size
alist = [0.1429,0.1538,0.1667,0.1818,0.2000,0.2222,0.2500,0.2857,0.3333]
suff = 'm1_00p3mh-alpha-0p8-subvol'
if boxsize == 'big':
suff = suff + '-big'
bs = 1024
else: bs = 256
outfolder = '../data/outputs/' + suff[:]
outfolder += "/%s/"%model
#
def get_ilpk(aa):
'''return log-interpolated pk at redshift z(aa) for Hankel transform'''
pkd = np.loadtxt(outfolder + "HI_bias_{:06.4f}.txt".format(aa))[1:,:]
kk, pk = pkd[:, 0], pkd[:, 2]**2 * pkd[:, 3]
#on large scales, extend with b^2*P_lin
klin, plin = np.loadtxt('../data/pk_Planck2018BAO_matterpower_z000.dat', unpack=True)
ipklin = ius(klin, plin)
kt = np.concatenate((klin[(klin < kk[0])], kk))
pt = np.concatenate((plin[(klin < kk[0])]*pk[0]/ipklin(kk[0]), pk))
#On small scales, truncate at k=1
pt = pt[kt<1]
kt = kt[kt<1]
ilpk = loginterp(kt, pt)
return ilpk
def xij0f(k, r, ipk):
'''return integrand for hankel transform with j0'''
return k**3*ipk(k)/2/np.pi**2 *j0(k*r)/k
if __name__=="__main__":
r = np.linspace(1, 120, 5)
for iz, aa in enumerate(alist):
zz = 1/aa-1
print('For redshift : z = ', zz)
xi = np.zeros_like(r)
ilpk = get_ilpk(aa)
for i in range(r.size):
if i%1 == 0: print(i, r[i])
f = lambda k: xij0f(k, r[i], ilpk)
xi[i] = quadrature(f, 1e-5, 1e2, maxiter=1000)[0]
np.savetxt(outfolder + "HI_hankelxi_{:06.4f}.txt".format(aa), np.stack((r, xi)).T, header='r, xi')
|
the-stack_106_13890
|
from __future__ import absolute_import
import copy
import heapq
import itertools
import logging
import os
import random
import select
import time
import six
import kafka.common as Errors # TODO: make Errors a separate class
from .cluster import ClusterMetadata
from .conn import BrokerConnection, ConnectionStates, collect_hosts
from .future import Future
from .protocol.metadata import MetadataRequest
from .protocol.produce import ProduceRequest
from .version import __version__
if six.PY2:
ConnectionError = None
log = logging.getLogger('kafka.client')
class KafkaClient(object):
"""
A network client for asynchronous request/response network i/o.
This is an internal class used to implement the
user-facing producer and consumer clients.
This class is not thread-safe!
"""
DEFAULT_CONFIG = {
'bootstrap_servers': 'localhost',
'client_id': 'kafka-python-' + __version__,
'request_timeout_ms': 40000,
'reconnect_backoff_ms': 50,
'max_in_flight_requests_per_connection': 5,
'receive_buffer_bytes': 32768,
'send_buffer_bytes': 131072,
'retry_backoff_ms': 100,
'metadata_max_age_ms': 300000,
}
def __init__(self, **configs):
"""Initialize an asynchronous kafka client
Keyword Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 40000.
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: 131072
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: 32768
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
"""
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
self.cluster = ClusterMetadata(**self.config)
self._topics = set() # empty set will fetch all topic metadata
self._metadata_refresh_in_progress = False
self._conns = {}
self._connecting = set()
self._delayed_tasks = DelayedTaskQueue()
self._last_bootstrap = 0
self._bootstrap_fails = 0
self._bootstrap(collect_hosts(self.config['bootstrap_servers']))
self._wake_r, self._wake_w = os.pipe()
def _bootstrap(self, hosts):
# Exponential backoff if bootstrap fails
backoff_ms = self.config['reconnect_backoff_ms'] * 2 ** self._bootstrap_fails
next_at = self._last_bootstrap + backoff_ms / 1000.0
now = time.time()
if next_at > now:
log.debug("Sleeping %0.4f before bootstrapping again", next_at - now)
time.sleep(next_at - now)
self._last_bootstrap = time.time()
metadata_request = MetadataRequest([])
for host, port in hosts:
log.debug("Attempting to bootstrap via node at %s:%s", host, port)
bootstrap = BrokerConnection(host, port, **self.config)
bootstrap.connect()
while bootstrap.state is ConnectionStates.CONNECTING:
bootstrap.connect()
if bootstrap.state is not ConnectionStates.CONNECTED:
bootstrap.close()
continue
future = bootstrap.send(metadata_request)
while not future.is_done:
bootstrap.recv()
if future.failed():
bootstrap.close()
continue
self.cluster.update_metadata(future.value)
# A cluster with no topics can return no broker metadata
# in that case, we should keep the bootstrap connection
if not len(self.cluster.brokers()):
self._conns['bootstrap'] = bootstrap
self._bootstrap_fails = 0
break
# No bootstrap found...
else:
log.error('Unable to bootstrap from %s', hosts)
# Max exponential backoff is 2^12, x4000 (50ms -> 200s)
self._bootstrap_fails = min(self._bootstrap_fails + 1, 12)
def _can_connect(self, node_id):
if node_id not in self._conns:
if self.cluster.broker_metadata(node_id):
return True
return False
conn = self._conns[node_id]
return conn.state is ConnectionStates.DISCONNECTED and not conn.blacked_out()
def _initiate_connect(self, node_id):
"""Initiate a connection to the given node (must be in metadata)"""
if node_id not in self._conns:
broker = self.cluster.broker_metadata(node_id)
assert broker, 'Broker id %s not in current metadata' % node_id
log.debug("Initiating connection to node %s at %s:%s",
node_id, broker.host, broker.port)
self._conns[node_id] = BrokerConnection(broker.host, broker.port,
**self.config)
return self._finish_connect(node_id)
def _finish_connect(self, node_id):
assert node_id in self._conns, '%s is not in current conns' % node_id
state = self._conns[node_id].connect()
if state is ConnectionStates.CONNECTING:
self._connecting.add(node_id)
elif node_id in self._connecting:
log.debug("Node %s connection state is %s", node_id, state)
self._connecting.remove(node_id)
if state is ConnectionStates.DISCONNECTED:
log.warning("Node %s connect failed -- refreshing metadata", node_id)
self.cluster.request_update()
return state
def ready(self, node_id):
"""Check whether a node is connected and ok to send more requests.
Arguments:
node_id (int): the id of the node to check
Returns:
bool: True if we are ready to send to the given node
"""
if self.is_ready(node_id):
return True
if self._can_connect(node_id):
# if we are interested in sending to a node
# and we don't have a connection to it, initiate one
self._initiate_connect(node_id)
if node_id in self._connecting:
self._finish_connect(node_id)
return self.is_ready(node_id)
def close(self, node_id=None):
"""Closes the connection to a particular node (if there is one).
Arguments:
node_id (int): the id of the node to close
"""
if node_id is None:
for conn in self._conns.values():
conn.close()
elif node_id in self._conns:
self._conns[node_id].close()
else:
log.warning("Node %s not found in current connection list; skipping", node_id)
return
def is_disconnected(self, node_id):
"""Check whether the node connection has been disconnected failed.
A disconnected node has either been closed or has failed. Connection
failures are usually transient and can be resumed in the next ready()
call, but there are cases where transient failures need to be caught
and re-acted upon.
Arguments:
node_id (int): the id of the node to check
Returns:
bool: True iff the node exists and is disconnected
"""
if node_id not in self._conns:
return False
return self._conns[node_id].state is ConnectionStates.DISCONNECTED
def connection_delay(self, node_id):
"""
Returns the number of milliseconds to wait, based on the connection
state, before attempting to send data. When disconnected, this respects
the reconnect backoff time. When connecting or connected, this handles
slow/stalled connections.
@param node_id The id of the node to check
@return The number of milliseconds to wait.
"""
if node_id not in self._conns:
return 0
conn = self._conns[node_id]
time_waited_ms = time.time() - (conn.last_attempt or 0)
if conn.state is ConnectionStates.DISCONNECTED:
return max(self.config['reconnect_backoff_ms'] - time_waited_ms, 0)
else:
return 999999999
def is_ready(self, node_id):
"""Check whether a node is ready to send more requests.
In addition to connection-level checks, this method also is used to
block additional requests from being sent during a metadata refresh.
Arguments:
node_id (int): id of the node to check
Returns:
bool: True if the node is ready and metadata is not refreshing
"""
# if we need to update our metadata now declare all requests unready to
# make metadata requests first priority
if not self._metadata_refresh_in_progress and not self.cluster.ttl() == 0:
if self._can_send_request(node_id):
return True
return False
def _can_send_request(self, node_id):
if node_id not in self._conns:
return False
conn = self._conns[node_id]
return conn.connected() and conn.can_send_more()
def send(self, node_id, request):
"""Send a request to a specific node.
Arguments:
node_id (int): destination node
request (Struct): request object (not-encoded)
Raises:
NodeNotReadyError: if node_id is not ready
Returns:
Future: resolves to Response struct
"""
if not self._can_send_request(node_id):
raise Errors.NodeNotReadyError("Attempt to send a request to node"
" which is not ready (node id %s)."
% node_id)
# Every request gets a response, except one special case:
expect_response = True
if isinstance(request, ProduceRequest) and request.required_acks == 0:
expect_response = False
return self._conns[node_id].send(request, expect_response=expect_response)
def poll(self, timeout_ms=None, future=None, sleep=False):
"""Try to read and write to sockets.
This method will also attempt to complete node connections, refresh
stale metadata, and run previously-scheduled tasks.
Arguments:
timeout_ms (int, optional): maximum amount of time to wait (in ms)
for at least one response. Must be non-negative. The actual
timeout will be the minimum of timeout, request timeout and
metadata timeout. Default: request_timeout_ms
future (Future, optional): if provided, blocks until future.is_done
sleep (bool): if True and there is nothing to do (no connections
or requests in flight), will sleep for duration timeout before
returning empty results. Default: False.
Returns:
list: responses received (can be empty)
"""
if timeout_ms is None:
timeout_ms = self.config['request_timeout_ms']
responses = []
# Loop for futures, break after first loop if None
while True:
# Attempt to complete pending connections
for node_id in list(self._connecting):
self._finish_connect(node_id)
# Send a metadata request if needed
metadata_timeout_ms = self._maybe_refresh_metadata()
# Send scheduled tasks
for task, task_future in self._delayed_tasks.pop_ready():
try:
result = task()
except Exception as e:
log.error("Task %s failed: %s", task, e)
task_future.failure(e)
else:
task_future.success(result)
# If we got a future that is already done, dont block in _poll
if future and future.is_done:
timeout = 0
else:
timeout = min(
timeout_ms,
metadata_timeout_ms,
self._delayed_tasks.next_at() * 1000,
self.config['request_timeout_ms'])
timeout = max(0, timeout / 1000.0) # avoid negative timeouts
responses.extend(self._poll(timeout, sleep=sleep))
# If all we had was a timeout (future is None) - only do one poll
# If we do have a future, we keep looping until it is done
if not future or future.is_done:
break
return responses
def _poll(self, timeout, sleep=False):
# select on reads across all connected sockets, blocking up to timeout
sockets = dict([(conn._sock, conn)
for conn in six.itervalues(self._conns)
if conn.state is ConnectionStates.CONNECTED
and conn.in_flight_requests])
if not sockets:
# if sockets are connecting, we can wake when they are writeable
if self._connecting:
sockets = [self._conns[node]._sock for node in self._connecting]
select.select([self._wake_r], sockets, [], timeout)
elif timeout:
if sleep:
log.debug('Sleeping at %s for %s', time.time(), timeout)
select.select([self._wake_r], [], [], timeout)
log.debug('Woke up at %s', time.time())
else:
log.warning('_poll called with a non-zero timeout and'
' sleep=False -- but there was nothing to do.'
' This can cause high CPU usage during idle.')
self._clear_wake_fd()
return []
# Add a private pipe fd to allow external wakeups
fds = list(sockets.keys())
fds.append(self._wake_r)
ready, _, _ = select.select(fds, [], [], timeout)
responses = []
for sock in ready:
if sock == self._wake_r:
continue
conn = sockets[sock]
while conn.in_flight_requests:
response = conn.recv() # Note: conn.recv runs callbacks / errbacks
if not response:
break
responses.append(response)
self._clear_wake_fd()
return responses
def in_flight_request_count(self, node_id=None):
"""Get the number of in-flight requests for a node or all nodes.
Arguments:
node_id (int, optional): a specific node to check. If unspecified,
return the total for all nodes
Returns:
int: pending in-flight requests for the node, or all nodes if None
"""
if node_id is not None:
if node_id not in self._conns:
return 0
return len(self._conns[node_id].in_flight_requests)
else:
return sum([len(conn.in_flight_requests) for conn in self._conns.values()])
def least_loaded_node(self):
"""Choose the node with fewest outstanding requests, with fallbacks.
This method will prefer a node with an existing connection, but will
potentially choose a node for which we don't yet have a connection if
all existing connections are in use. This method will never choose a
node that was disconnected within the reconnect backoff period.
If all else fails, the method will attempt to bootstrap again using the
bootstrap_servers list.
Returns:
node_id or None if no suitable node was found
"""
nodes = list(self._conns.keys())
random.shuffle(nodes)
inflight = float('inf')
found = None
for node_id in nodes:
conn = self._conns[node_id]
curr_inflight = len(conn.in_flight_requests)
if curr_inflight == 0 and conn.connected():
# if we find an established connection with no in-flight requests we can stop right away
return node_id
elif not conn.blacked_out() and curr_inflight < inflight:
# otherwise if this is the best we have found so far, record that
inflight = curr_inflight
found = node_id
if found is not None:
return found
# if we found no connected node, return a disconnected one
log.debug("No connected nodes found. Trying disconnected nodes.")
for node_id in nodes:
if not self._conns[node_id].blacked_out():
return node_id
# if still no luck, look for a node not in self._conns yet
log.debug("No luck. Trying all broker metadata")
for broker in self.cluster.brokers():
if broker.nodeId not in self._conns:
return broker.nodeId
# Last option: try to bootstrap again
log.error('No nodes found in metadata -- retrying bootstrap')
self._bootstrap(collect_hosts(self.config['bootstrap_servers']))
return None
def set_topics(self, topics):
"""Set specific topics to track for metadata.
Arguments:
topics (list of str): topics to check for metadata
Returns:
Future: resolves after metadata request/response
"""
if set(topics).difference(self._topics):
future = self.cluster.request_update()
else:
future = Future().success(set(topics))
self._topics = set(topics)
return future
def add_topic(self, topic):
"""Add a topic to the list of topics tracked via metadata.
Arguments:
topic (str): topic to track
Returns:
Future: resolves after metadata request/response
"""
if topic in self._topics:
return Future().success(set(self._topics))
self._topics.add(topic)
return self.cluster.request_update()
# request metadata update on disconnect and timedout
def _maybe_refresh_metadata(self):
"""Send a metadata request if needed.
Returns:
int: milliseconds until next refresh
"""
ttl = self.cluster.ttl()
if ttl > 0:
return ttl
if self._metadata_refresh_in_progress:
return 9999999999
node_id = self.least_loaded_node()
if self._can_send_request(node_id):
request = MetadataRequest(list(self._topics))
log.debug("Sending metadata request %s to node %s", request, node_id)
future = self.send(node_id, request)
future.add_callback(self.cluster.update_metadata)
future.add_errback(self.cluster.failed_update)
self._metadata_refresh_in_progress = True
def refresh_done(val_or_error):
self._metadata_refresh_in_progress = False
future.add_callback(refresh_done)
future.add_errback(refresh_done)
elif self._can_connect(node_id):
log.debug("Initializing connection to node %s for metadata request", node_id)
self._initiate_connect(node_id)
return 0
def schedule(self, task, at):
"""Schedule a new task to be executed at the given time.
This is "best-effort" scheduling and should only be used for coarse
synchronization. A task cannot be scheduled for multiple times
simultaneously; any previously scheduled instance of the same task
will be cancelled.
Arguments:
task (callable): task to be scheduled
at (float or int): epoch seconds when task should run
Returns:
Future: resolves to result of task call, or exception if raised
"""
return self._delayed_tasks.add(task, at)
def unschedule(self, task):
"""Unschedule a task.
This will remove all instances of the task from the task queue.
This is a no-op if the task is not scheduled.
Arguments:
task (callable): task to be unscheduled
"""
self._delayed_tasks.remove(task)
def check_version(self, node_id=None):
"""Attempt to guess the broker version"""
if node_id is None:
node_id = self.least_loaded_node()
def connect():
timeout = time.time() + 10
# brokers < 0.9 do not return any broker metadata if there are no topics
# so we're left with a single bootstrap connection
while not self.ready(node_id):
if time.time() >= timeout:
raise Errors.NodeNotReadyError(node_id)
time.sleep(0.025)
# kafka kills the connection when it doesnt recognize an API request
# so we can send a test request and then follow immediately with a
# vanilla MetadataRequest. If the server did not recognize the first
# request, both will be failed with a ConnectionError that wraps
# socket.error (32, 54, or 104)
import socket
from .protocol.admin import ListGroupsRequest
from .protocol.commit import (
OffsetFetchRequest_v0, GroupCoordinatorRequest)
from .protocol.metadata import MetadataRequest
# Socket errors are logged as exceptions and can alarm users. Mute them
from logging import Filter
class ConnFilter(Filter):
def filter(self, record):
if record.funcName in ('recv', 'send'):
return False
return True
log_filter = ConnFilter()
test_cases = [
('0.9', ListGroupsRequest()),
('0.8.2', GroupCoordinatorRequest('kafka-python-default-group')),
('0.8.1', OffsetFetchRequest_v0('kafka-python-default-group', [])),
('0.8.0', MetadataRequest([])),
]
logging.getLogger('kafka.conn').addFilter(log_filter)
for version, request in test_cases:
connect()
f = self.send(node_id, request)
time.sleep(0.1) # HACK: sleeping to wait for socket to send bytes
metadata = self.send(node_id, MetadataRequest([]))
self.poll(future=f)
self.poll(future=metadata)
assert f.is_done
if f.succeeded():
log.info('Broker version identifed as %s', version)
break
if six.PY2:
assert isinstance(f.exception.args[0], socket.error)
assert f.exception.args[0].errno in (32, 54, 104)
else:
assert isinstance(f.exception.args[0], ConnectionError)
log.info("Broker is not v%s -- it did not recognize %s",
version, request.__class__.__name__)
continue
else:
raise Errors.UnrecognizedBrokerVersion()
logging.getLogger('kafka.conn').removeFilter(log_filter)
return version
def wakeup(self):
os.write(self._wake_w, b'x')
def _clear_wake_fd(self):
while True:
fds, _, _ = select.select([self._wake_r], [], [], 0)
if not fds:
break
os.read(self._wake_r, 1)
class DelayedTaskQueue(object):
# see https://docs.python.org/2/library/heapq.html
def __init__(self):
self._tasks = [] # list of entries arranged in a heap
self._task_map = {} # mapping of tasks to entries
self._counter = itertools.count() # unique sequence count
def add(self, task, at):
"""Add a task to run at a later time.
Arguments:
task: can be anything, but generally a callable
at (float or int): epoch seconds to schedule task
Returns:
Future: a future that will be returned with the task when ready
"""
if task in self._task_map:
self.remove(task)
count = next(self._counter)
future = Future()
entry = [at, count, (task, future)]
self._task_map[task] = entry
heapq.heappush(self._tasks, entry)
return future
def remove(self, task):
"""Remove a previously scheduled task.
Raises:
KeyError: if task is not found
"""
entry = self._task_map.pop(task)
task, future = entry[-1]
future.failure(Errors.Cancelled)
entry[-1] = 'REMOVED'
def _drop_removed(self):
while self._tasks and self._tasks[0][-1] is 'REMOVED':
at, count, task = heapq.heappop(self._tasks)
def _pop_next(self):
self._drop_removed()
if not self._tasks:
raise KeyError('pop from an empty DelayedTaskQueue')
_, _, maybe_task = heapq.heappop(self._tasks)
if maybe_task is 'REMOVED':
raise ValueError('popped a removed tasks from queue - bug')
else:
task, future = maybe_task
del self._task_map[task]
return (task, future)
def next_at(self):
"""Number of seconds until next task is ready."""
self._drop_removed()
if not self._tasks:
return 9999999999
else:
return max(self._tasks[0][0] - time.time(), 0)
def pop_ready(self):
"""Pop and return a list of all ready (task, future) tuples"""
ready_tasks = []
while self._tasks and self._tasks[0][0] < time.time():
try:
task = self._pop_next()
except KeyError:
break
ready_tasks.append(task)
return ready_tasks
|
the-stack_106_13894
|
# pylint: disable=protected-access
from collections import OrderedDict
from testfixtures import compare
from service.ws_re.register.authors import Authors
from service.ws_re.register.register_types.public_domain import PublicDomainRegister
from service.ws_re.register.register_types.volume import VolumeRegister
from service.ws_re.register.test_base import BaseTestRegister, copy_tst_data
from service.ws_re.volumes import Volumes
class TestPublicDomainRegister(BaseTestRegister):
def setUp(self):
copy_tst_data("authors_pd_register", "authors")
copy_tst_data("I_1_alpha", "I_1")
copy_tst_data("III_1_alpha", "III_1")
self.authors = Authors()
self.volumes = Volumes()
self.registers = OrderedDict()
self.registers["I,1"] = VolumeRegister(self.volumes["I,1"], self.authors)
self.registers["III,1"] = VolumeRegister(self.volumes["III,1"], self.authors)
def test_pd_authors(self):
pd_2021_register = PublicDomainRegister(2021, self.authors, self.registers)
compare(2, len(pd_2021_register._get_pd_authors()))
def test_init(self):
pd_2021_register = PublicDomainRegister(2021, self.authors, self.registers)
compare(6, len(pd_2021_register))
def test_make_table(self):
pd_2021_register = PublicDomainRegister(2021, self.authors, self.registers)
expected_table = """{|class="wikitable sortable"
!Artikel
!Band
!Status
!Wikilinks
!Seite
!Autor
!Sterbejahr
|-
|data-sort-value="aal"|[[RE:Aal|'''{{Anker2|Aal}}''']]
||I,1
|style="background:#AA0000"|UNK
||
|[[Special:Filepath/Pauly-Wissowa_I,1,_0001.jpg|1]]-4
|William Abbott
|style="background:#CBCBCB"|
|-
|data-sort-value="aba 001"|[[RE:Aba 1|'''{{Anker2|Aba 1}}''']]
||I,1
|style="background:#AA0000"|UNK
||
|[[Special:Filepath/Pauly-Wissowa_I,1,_0003.jpg|4]]
|Herman Abel
|style="background:#B9FFC5"|1950
|-
|data-sort-value="aba 002"|[[RE:Aba 2|'''{{Anker2|Aba 2}}''']]
||I,1
|style="background:#556B2F"|KOR
||
|[[Special:Filepath/Pauly-Wissowa_I,1,_0003.jpg|4]]
|Herman Abel
|style="background:#B9FFC5"|1950
|-
|data-sort-value="adam"|[[RE:Adam|'''{{Anker2|Adam}}''']]
||III,1
|style="background:#AA0000"|UNK
||
|[[Special:Filepath/Pauly-Wissowa_III,1,_0001.jpg|1]]-4
|William Abbott
|style="background:#CBCBCB"|
|-
|rowspan=2 data-sort-value="beta"|[[RE:Beta|'''{{Anker2|Beta}}''']]
|rowspan=2 |I,1
|rowspan=2 style="background:#669966"|FER
|rowspan=2 |
|[[Special:Filepath/Pauly-Wissowa_I,1,_0003.jpg|4]]
|Abert
|style="background:#B9FFC5"|1927
|-
|[[Special:Filepath/Pauly-Wissowa_I,1,_0003.jpg|4]]-5
|Herman Abel
|style="background:#B9FFC5"|1950
|-
|data-sort-value="charlie"|[[RE:Charlie|'''{{Anker2|Charlie}}''']]
||III,1
|style="background:#669966"|FER
||
|[[Special:Filepath/Pauly-Wissowa_III,1,_0003.jpg|4]]
|Herman Abel
|style="background:#B9FFC5"|1950
|}
Zahl der Artikel: 6, """
compare(expected_table, pd_2021_register.get_register_str())
|
the-stack_106_13895
|
class Queue:
"""
A queue using two stacks (head and tail), the head
stack keeps the elements in a queue order, it means
ready to dequeue or print the top element.
When necessary the last elements in the tail stack
are moved to the head stack. The normalize_head method
verify if its necessary to move elements from tail to head.
"""
head = []
tail = []
def normalize_head(self):
if not self.head:
while self.tail:
self.head.append(self.tail.pop())
def enqueue(self, element):
self.tail.append(element)
def dequeue(self):
self.normalize_head()
self.head.pop()
def print_top(self):
self.normalize_head()
print(self.head[-1])
if __name__ == '__main__':
queue = Queue()
total_queries = int(input())
for _ in range(total_queries):
query_element = input().split()
query = int(query_element[0])
if query == 1:
element = int(query_element[1])
queue.enqueue(element)
if query == 2:
queue.dequeue()
if query == 3:
queue.print_top()
|
the-stack_106_13896
|
#!/usr/bin/env python3
import argparse
import os
import platform
cflags = "-g -Wall -Wextra -Wpedantic -Werror -fPIC "
cflags += "-Wno-gnu-zero-variadic-macro-arguments "
cflags += "-Wno-unused-parameter "
cflags += "-std=c11 -fcolor-diagnostics "
plat = platform.system()
if plat == 'Darwin':
cflags += '-DPLATFORM_DARWIN '
ninja_vars = {
"builddir" : "build",
"cc" : "clang",
"cflags" : cflags,
"ldflags" : "-L$builddir",
}
ninjafile_base = """
rule cc
command = $cc -MMD -MT $out -MF $out.d $cflags -c $in -o $out
description = CC $out
depfile = $out.d
deps = gcc
rule link
command = $cc $ldflags -o $out $in
description = LINK $out
rule shlib
command = $cc $ldflags -shared -o $out $in
description = SHLIB $out
"""
def get_san_flags(desc):
if desc is None:
return ""
flags = ""
sans = desc.split(',')
for san in sans:
flags += " -fsanitize=%s" % san
if san == "undefined":
flags += " -fno-sanitize-recover=undefined"
return flags
def strip_src_ext(src_file):
parts = src_file.split(".")
if len(parts) != 2 or parts[1] not in ["c","m"]:
return None
return parts[0], parts[1]
class BuildEnv:
def __init__(self, vars):
self.vars = vars
self.progs = []
self.objs = []
self.obj_ext_map = {}
self.shared_libs = []
def IsWindows(self):
return os.name == 'nt'
def IsDarwin(self):
return platform.system() == 'Darwin'
def src_helper(self, src):
objects = []
for f in src:
obj_name, ext = strip_src_ext(f)
objects.append(obj_name)
if obj_name not in self.objs:
self.objs.append(obj_name)
self.obj_ext_map[obj_name] = ext
return objects
def Program(self, name, src, **kwargs):
objects = self.src_helper(src)
self.progs.append((name, objects, kwargs))
def SharedLibrary(self, name, src):
objects = self.src_helper(src)
self.shared_libs.append((name, objects))
def Test(self, name, src):
return self.Program("test/%s" % name, src + ["test.c"])
def write_ninja(self, fp):
fp.write("# auto-generated by configure.py\n")
for k,v in self.vars.items():
fp.write("%s = %s\n" % (k,v))
fp.write(ninjafile_base)
fp.write("# objects\n")
for obj in self.objs:
ext = self.obj_ext_map[obj]
fp.write("build $builddir/%s.o: cc %s.%s\n" % (obj, obj, ext))
fp.write("\n# executables\n")
for (name, objs, props) in self.progs:
obj_line = " ".join(map(lambda x: "$builddir/%s.o" % x, objs))
fw_part = ""
if "frameworks" in props:
fws = props["frameworks"]
fw_part += "\n ldflags = $ldflags"
fw_part += "".join(map(lambda x: " -framework %s" % x, fws))
ext = ".exe" if self.IsWindows() else ""
fp.write("build $builddir/%s%s: link %s %s\n" % (name, ext, obj_line, fw_part))
fp.write("\n# shared libraries\n")
for (name, objs) in self.shared_libs:
obj_line = " ".join(map(lambda x: "$builddir/%s.o" % x, objs))
fp.write("build $builddir/%s: shlib %s\n" % (name, obj_line))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--sanitizers', '--san', dest='sanitizers', default=None)
parser.add_argument('--config', default='debug')
args = parser.parse_args()
san_flags = get_san_flags(args.sanitizers)
ninja_vars["cflags"] += san_flags
ninja_vars["ldflags"] += san_flags
if args.config == "release":
ninja_vars["cflags"] += ' -O3'
else:
assert args.config == "debug", \
"Invalid config %s" % args.config
env = BuildEnv(ninja_vars)
env.Test('test_basic_lin_alloc',
['test_basic_lin_alloc.c', 'basic_lin_alloc.c'])
env.Test('test_better_lin_alloc',
['test_better_lin_alloc.c', 'better_lin_alloc.c', 'safe_printf.c'])
env.SharedLibrary('cheesy_malloc.so',
['cheesy_malloc.c', 'better_lin_alloc.c', 'safe_printf.c'])
if env.IsDarwin():
env.Program('objc_test', ['objc_test.m'], frameworks=['Foundation'])
with open("build.ninja", "w") as f:
env.write_ninja(f)
|
the-stack_106_13900
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks the result of a test experiment run. Note that this is not a
standalone unit test module, but used as part of our end-to-end integration
test."""
import os
import pytest
import redis
import rq
from common import config_utils, yaml_utils
from experiment.build import docker_images
@pytest.fixture(scope='class')
def experiment_config():
"""Returns the default configuration for end-to-end testing."""
return config_utils.validate_and_expand(
yaml_utils.read('fuzzbench/test_e2e/end-to-end-test-config.yaml'))
@pytest.fixture(scope='class')
def redis_connection():
"""Returns the default redis server connection."""
return redis.Redis(host='queue-server')
# pylint: disable=no-self-use
@pytest.mark.skipif('E2E_INTEGRATION_TEST' not in os.environ,
reason='Not running end-to-end test.')
@pytest.mark.usefixtures('redis_connection', 'experiment_config')
class TestEndToEndRunResults:
"""Checks the result of a test experiment run."""
def test_jobs_dependency(self, experiment_config, redis_connection): # pylint: disable=redefined-outer-name
"""Tests that jobs dependency preserves during working."""
all_images = docker_images.get_images_to_build(
experiment_config['fuzzers'], experiment_config['benchmarks'])
jobs = {
name: rq.job.Job.fetch(name, connection=redis_connection)
for name in all_images
}
for name, image in all_images.items():
if 'depends_on' in image:
for dep in image['depends_on']:
assert jobs[dep].ended_at <= jobs[name].started_at
def test_all_jobs_finished_successfully(
self,
experiment_config, # pylint: disable=redefined-outer-name
redis_connection): # pylint: disable=redefined-outer-name
"""Tests all jobs finished successully."""
all_images = docker_images.get_images_to_build(
experiment_config['fuzzers'], experiment_config['benchmarks'])
jobs = rq.job.Job.fetch_many(all_images.keys(),
connection=redis_connection)
for job in jobs:
assert job.get_status() == 'finished'
def test_measurement_jobs_were_started_before_trial_jobs_finished(self):
"""Fake test to be implemented later."""
assert True
def test_db_contains_experiment_results(self):
"""Fake test to be implemented later."""
assert True
def test_experiment_report_is_generated(self):
"""Fake test to be implemented later."""
assert True
|
the-stack_106_13902
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
"""
This module provides the abstract BinaryPackage class, which should be
inherited when implementing additional binary package supports.
qiBuild toolchains contain a set of packages, which can be extended.
This module provides utility functions to import binary packages used by some
distribution into any qiBuild toolchain.
All qiBuild packages should have the same layout.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import pprint
class BinaryPackageException(Exception):
""" Just a custom exception """
def __init__(self, message):
""" BinaryPackageException Init """
super(BinaryPackageException, self).__init__()
self._message = message
def __str__(self):
""" BinaryPackageException String Representation """
message = "Binary package exception:\n"
message += self._message
return message
class BinaryPackage(object):
"""
A binary package is the endpoint of a binary package file
provided by most of the Linux distributions.
It stores metadata read from the binary package itself.
"""
def __init__(self, package_path):
""" BinaryPackage Init """
self.path = package_path
self.metadata = None
self.name = None
def load(self):
"""
Set self.metadata and self.name
If the metadata has not been cached yet, then it is read/loaded and
cached in the instance.
The metadata is stored in a dictionary, which has the following layout::
metadata = {
name,
version,
revision,
arch,
arch_variant,
dependencies = {
buidtime,
runtime,
post-install,
all,
},
}
:return: the metadata dictionary
"""
if self.metadata:
return
self._load()
if "name" not in self.metadata:
raise Exception("Failed to load package. "
"Expecting at least a 'name' key "
"in package metadata")
self.name = self.metadata["name"]
def get_metadata(self):
""" Get the metadata from the package. """
# Cache the result inside the Package instance:
if self.metadata:
return self.metadata
self.load()
return self.metadata
def _load(self):
""" Each binary package should at least implement this. """
raise NotImplementedError()
def extract(self, dest_dir):
"""
Extract the binary package content, without the metadata.
:param dest_dir: the extraction directory
:return: the root directory of the extracted content
"""
raise NotImplementedError()
def __str__(self):
""" BinaryPackage String Representation """
res = "Binary package:\n"
res += ' Path: {0}\n'.format(self.path)
res += ' Metadata:\n'
res += pprint.pformat(self.metadata, indent=2)
return res
|
the-stack_106_13903
|
import numpy as np
import scipy.sparse as sp
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics.cluster import silhouette_score
from sklearn.metrics.cluster import calinski_harabaz_score
from sklearn.metrics import pairwise_distances
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X_dense = dataset.data
X_csr = csr_matrix(X_dense)
X_dok = sp.dok_matrix(X_dense)
X_lil = sp.lil_matrix(X_dense)
y = dataset.target
for X in [X_dense, X_csr, X_dok, X_lil]:
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
score_precomputed = silhouette_score(D, y, metric='precomputed')
assert_greater(score_precomputed, 0)
# Test without calculating D
score_euclidean = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(score_precomputed, score_euclidean)
if X is X_dense:
score_dense_without_sampling = score_precomputed
else:
assert_almost_equal(score_euclidean,
score_dense_without_sampling)
# Test with sampling
score_precomputed = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
score_euclidean = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert_greater(score_precomputed, 0)
assert_greater(score_euclidean, 0)
assert_almost_equal(score_euclidean, score_precomputed)
if X is X_dense:
score_dense_with_sampling = score_precomputed
else:
assert_almost_equal(score_euclidean, score_dense_with_sampling)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
def test_non_encoded_labels():
dataset = datasets.load_iris()
X = dataset.data
labels = dataset.target
assert_equal(
silhouette_score(X, labels + 10), silhouette_score(X, labels))
def test_non_numpy_labels():
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
assert_equal(
silhouette_score(list(X), list(y)), silhouette_score(X, y))
def test_calinski_harabaz_score():
rng = np.random.RandomState(seed=0)
# Assert message when there is only one label
assert_raise_message(ValueError, "Number of labels is",
calinski_harabaz_score,
rng.rand(10, 2), np.zeros(10))
# Assert message when all point are in different clusters
assert_raise_message(ValueError, "Number of labels is",
calinski_harabaz_score,
rng.rand(10, 2), np.arange(10))
# Assert the value is 1. when all samples are equals
assert_equal(1., calinski_harabaz_score(np.ones((10, 2)),
[0] * 5 + [1] * 5))
# Assert the value is 0. when all the mean cluster are equal
assert_equal(0., calinski_harabaz_score([[-1, -1], [1, 1]] * 10,
[0] * 10 + [1] * 10))
# General case (with non numpy arrays)
X = ([[0, 0], [1, 1]] * 5 + [[3, 3], [4, 4]] * 5 +
[[0, 4], [1, 3]] * 5 + [[3, 1], [4, 0]] * 5)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
assert_almost_equal(calinski_harabaz_score(X, labels),
45 * (40 - 4) / (5 * (4 - 1)))
|
the-stack_106_13905
|
# -*- coding: utf-8 -*-
""" Sahana Eden Organisation Model
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3OrganisationModel",
"S3OrganisationNameModel",
"S3OrganisationBranchModel",
"S3OrganisationGroupModel",
"S3OrganisationGroupPersonModel",
"S3OrganisationGroupTeamModel",
"S3OrganisationLocationModel",
"S3OrganisationResourceModel",
"S3OrganisationSectorModel",
"S3OrganisationServiceModel",
"S3OrganisationSummaryModel",
"S3OrganisationTeamModel",
"S3OrganisationTypeTagModel",
"S3SiteModel",
"S3SiteDetailsModel",
"S3FacilityModel",
"org_facility_rheader",
"S3RoomModel",
"S3OfficeModel",
"S3OfficeSummaryModel",
"S3OfficeTypeTagModel",
"org_organisation_logo",
"org_organisation_address",
"org_parents",
"org_root_organisation",
"org_root_organisation_name",
"org_organisation_requires",
"org_region_options",
"org_rheader",
"org_site_staff_config",
"org_organisation_controller",
"org_office_controller",
"org_facility_controller",
"org_update_affiliations",
"org_OrganisationRepresent",
"org_SiteRepresent",
#"org_AssignMethod",
"org_customise_org_resource_fields",
"org_organisation_list_layout",
"org_resource_list_layout",
"org_update_root_organisation",
)
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
try:
from gluon.dal.objects import Row
except ImportError:
# old web2py
from gluon.dal import Row
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class S3OrganisationModel(S3Model):
"""
Organisations
"""
names = ("org_organisation_type",
"org_organisation_type_id",
"org_region",
"org_organisation",
"org_organisation_id",
"org_organisation_organisation_type",
"org_organisation_user",
"org_organisation_represent",
)
def model(self):
T = current.T
db = current.db
gis = current.gis
messages = current.messages
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
NONE = messages["NONE"]
hierarchical_organisation_types = settings.get_org_organisation_types_hierarchical()
multiple_organisation_types = settings.get_org_organisation_types_multiple()
# ---------------------------------------------------------------------
# Organisation Types
#
tablename = "org_organisation_type"
define_table(tablename,
Field("name", length=128, notnull=True, unique=True,
label = T("Name"),
),
Field("parent", "reference org_organisation_type", # This form of hierarchy may not work on all Databases
label = T("SubType of"),
ondelete = "RESTRICT",
readable = hierarchical_organisation_types,
writable = hierarchical_organisation_types,
),
s3_comments(),
*s3_meta_fields())
type_represent = S3Represent(lookup=tablename, translate=True)
if hierarchical_organisation_types:
hierarchy = "parent"
# Can't be defined in-line as otherwise get a circular reference
table = db[tablename]
table.parent.represent = type_represent
table.parent.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_organisation_type.id",
type_represent,
# If limiting to just 1 level of parent
#filterby="parent",
#filter_opts=(None,),
orderby="org_organisation_type.name"))
organisation_type_widget = S3HierarchyWidget(lookup = "org_organisation_type",
represent = type_represent,
multiple = multiple_organisation_types,
#leafonly = True,
)
type_filter = S3HierarchyFilter("organisation_organisation_type.organisation_type_id",
label = T("Type"),
#multiple = multiple_organisation_types,
)
type_widget = "hierarchy"
else:
hierarchy = None
organisation_type_widget = None
type_filter = S3OptionsFilter("organisation_organisation_type.organisation_type_id",
label = T("Type"),
#multiple = multiple_organisation_types,
)
type_widget = "multiselect"
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Organization Type"),
title_display = T("Organization Type Details"),
title_list = T("Organization Types"),
title_update = T("Edit Organization Type"),
label_list_button = T("List Organization Types"),
label_delete_button = T("Delete Organization Type"),
msg_record_created = T("Organization Type added"),
msg_record_modified = T("Organization Type updated"),
msg_record_deleted = T("Organization Type deleted"),
msg_list_empty = T("No Organization Types currently registered"))
organisation_type_id = S3ReusableField("organisation_type_id",
"reference %s" % tablename,
label = T("Organization Type"),
ondelete = "SET NULL",
represent = type_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"org_organisation_type.id",
type_represent,
sort = True
)),
sortby = "name",
widget = organisation_type_widget,
comment = S3AddResourceLink(c="org",
f="organisation_type",
label=T("Create Organization Type"),
title=T("Organization Type"),
tooltip=T("If you don't see the Type in the list, you can add a new one by clicking link 'Create Organization Type'.")
),
)
configure(tablename,
# Not needed since unique=True but would be
# if we removed to make these variable by Org
#deduplicate = self.organisation_type_duplicate,
hierarchy = hierarchy,
)
# Components
add_components(tablename,
# Tags
org_organisation_type_tag = {"name": "tag",
"joinby": "organisation_type_id",
},
)
if settings.get_org_regions():
hierarchical_regions = current.deployment_settings.get_org_regions_hierarchical()
# ---------------------------------------------------------------------
# Organisation Regions
#
tablename = "org_region"
define_table(tablename,
Field("name", length=128,
label = T("Name"),
),
Field("parent", "reference org_region", # This form of hierarchy may not work on all Databases
# Label hard-coded for IFRC currently
label = T("Zone"),
ondelete = "RESTRICT",
readable = hierarchical_regions,
writable = hierarchical_regions,
),
# Can add Path, Level, L0, L1 if-useful for performance, widgets, etc
s3_comments(),
*s3_meta_fields())
region_represent = S3Represent(lookup=tablename, translate=True)
if hierarchical_regions:
hierarchy = "parent"
# Can't be defined in-line as otherwise get a circular reference
table = db[tablename]
table.parent.represent = region_represent
table.parent.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_region.id",
region_represent,
# Limited to just 1 level of parent
# IFRC requirement
filterby="parent",
filter_opts=(None,),
orderby="org_region.name"))
else:
hierarchy = None
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add Region"),
title_display = T("Region Details"),
title_list = T("Regions"),
title_update = T("Edit Region"),
label_list_button = T("List Regions"),
label_delete_button = T("Delete Region"),
msg_record_created = T("Region added"),
msg_record_modified = T("Region updated"),
msg_record_deleted = T("Region deleted"),
msg_list_empty = T("No Regions currently registered"))
region_id = S3ReusableField("region_id", "reference %s" % tablename,
label = T("Region"),
ondelete = "SET NULL",
represent = region_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_region.id",
region_represent,
sort=True,
# Only show the Regions, not the Zones
not_filterby="parent",
not_filter_opts=(None,)
)),
sortby = "name",
comment = S3AddResourceLink(c="org",
f="region",
label=T("Add Region"),
title=T("Region"),
tooltip=T("If you don't see the Type in the list, you can add a new one by clicking link 'Add Region'.")
),
)
configure(tablename,
deduplicate = self.org_region_duplicate,
hierarchy = hierarchy,
)
else:
region_represent = None
region_id = S3ReusableField("region_id", "integer",
readable = False,
writable = False)
# ---------------------------------------------------------------------
# Organisations
# http://xmlns.com/foaf/0.1/Organisation
#
tablename = "org_organisation"
define_table(tablename,
self.super_link("pe_id", "pr_pentity"),
Field("root_organisation", "reference org_organisation",
readable = False,
writable = False,
represent = S3Represent(lookup="org_organisation"),
),
Field("name", notnull=True, unique=True, # @ToDo: Remove unique=True (ARC have 3x Wayne County chapters)
length=128, # Mayon Compatibility
label = T("Name"),
),
# http://hxl.humanitarianresponse.info/#abbreviation
Field("acronym", length=16,
label = T("Acronym"),
represent = lambda val: val or "",
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Acronym"),
T("Acronym of the organization's name, eg. IFRC.")))
),
#Field("registration", label = T("Registration")), # Registration Number
region_id(),
Field("country", length=2,
label = T("Home Country"),
represent = self.gis_country_code_represent,
requires = IS_EMPTY_OR(IS_IN_SET_LAZY(
lambda: gis.get_countries(key_type="code"),
zero=messages.SELECT_LOCATION)),
),
# @ToDo: Deprecate with Contact component
Field("phone",
label = T("Phone #"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(s3_phone_requires),
#readable = False,
#writable = False,
),
# http://hxl.humanitarianresponse.info/#organisationHomepage
Field("website",
label = T("Website"),
represent = s3_url_represent,
requires = IS_EMPTY_OR(IS_URL()),
),
Field("year", "integer",
label = T("Year"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(
IS_INT_IN_RANGE(1850, 2100)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Year"),
T("Year that the organization was founded"))),
),
Field("logo", "upload",
label = T("Logo"),
represent = self.doc_image_represent,
requires = [IS_EMPTY_OR(IS_IMAGE(maxsize=(400, 400),
error_message=T("Upload an image file (png or jpeg), max. 400x400 pixels!"))),
IS_EMPTY_OR(IS_UPLOAD_FILENAME())],
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Logo"),
T("Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400"))),
uploadfolder = os.path.join(
current.request.folder, "uploads"),
),
s3_comments(),
#document_id(), # Better to have multiple Documents on a Tab
#Field("privacy", "integer", default=0),
#Field("archived", "boolean", default=False),
*s3_meta_fields())
form_fields = [ "name",
"acronym",
S3SQLInlineLink(
"organisation_type",
field = "organisation_type_id",
label = T("Type"),
multiple = multiple_organisation_types,
widget = type_widget,
),
"region_id",
"country",
"phone",
"website",
"year",
"logo",
"comments",
]
if settings.get_org_summary():
# Include Summary fields in form
position = form_fields.index("year")
form_fields.insert(position + 1, "summary.national_staff")
form_fields.insert(position + 2, "summary.international_staff")
crud_form = S3SQLCustomForm(*form_fields
)
# CRUD strings
ADD_ORGANIZATION = T("Create Organization")
crud_strings[tablename] = Storage(
label_create = ADD_ORGANIZATION,
title_display = T("Organization Details"),
title_list = T("Organizations"),
title_update = T("Edit Organization"),
title_upload = T("Import Organizations"),
label_list_button = T("List Organizations"),
label_delete_button = T("Delete Organization"),
msg_record_created = T("Organization added"),
msg_record_modified = T("Organization updated"),
msg_record_deleted = T("Organization deleted"),
msg_list_empty = T("No Organizations currently registered"))
# Default widget
if settings.get_org_autocomplete():
help = messages.AUTOCOMPLETE_HELP
default_widget = S3OrganisationAutocompleteWidget(default_from_profile=True)
else:
help = T("If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.")
default_widget = None
org_widgets = {"default": default_widget}
# Representation for foreign keys
org_organisation_represent = org_OrganisationRepresent(show_link=True)
# Fields for text filter
text_fields = ["name",
"acronym",
"comments",
]
if settings.get_L10n_translate_org_organisation():
text_fields.extend(("name.name_l10n",
"name.acronym_l10n"
))
if settings.get_org_branches():
# Additional text filter fields for branches
text_fields.extend(("parent.name",
"parent.acronym",
))
text_comment = T("You can search by name, acronym, comments or parent name or acronym.")
# Hierarchy configuration and widget
configure(tablename,
# link table alias (organisation_branch) is ambiguous here
# => need to specify the full join
hierarchy="branch_id:org_organisation_branch.organisation_id")
org_widgets["hierarchy"] = S3HierarchyWidget(lookup="org_organisation",
represent=org_organisation_represent,
multiple=False,
leafonly=False,
)
else:
text_comment = T("You can search by name, acronym or comments")
organisation_comment = S3AddResourceLink(c="org", f="organisation",
label=ADD_ORGANIZATION,
title=ADD_ORGANIZATION,
tooltip=help)
from_organisation_comment = S3AddResourceLink(c="org",
f="organisation",
vars=dict(child="from_organisation_id"),
label=ADD_ORGANIZATION,
title=ADD_ORGANIZATION,
tooltip=help)
# Reusable field
auth = current.auth
organisation_id = S3ReusableField("organisation_id", "reference %s" % tablename,
comment = organisation_comment,
default = auth.user.organisation_id if auth.is_logged_in() \
else None,
label = messages.ORGANISATION,
ondelete = "RESTRICT",
represent = org_organisation_represent,
requires = org_organisation_requires(),
sortby = "name",
widgets = org_widgets,
)
filter_widgets = [
S3TextFilter(text_fields,
label = T("Search"),
comment = text_comment,
#_class = "filter-search",
),
# NB Order is important here - gets popped in asset & inv controllers & IFRC template
type_filter,
# NB Order is important here - gets popped in asset & inv controllers & IFRC template
S3OptionsFilter("sector_organisation.sector_id",
options = lambda: \
get_s3_filter_opts("org_sector",
location_filter=True,
none=True,
translate=True),
),
S3OptionsFilter("country",
#label = T("Home Country"),
),
]
location_context = settings.get_org_organisation_location_context()
utablename = auth.settings.table_user_name
configure(tablename,
context = {"location": location_context,
},
crud_form = crud_form,
deduplicate = self.organisation_duplicate,
filter_widgets = filter_widgets,
list_fields = ["id",
"name",
"acronym",
"organisation_organisation_type.organisation_type_id",
"website"
],
list_layout = org_organisation_list_layout,
list_orderby = "org_organisation.name",
onaccept = self.org_organisation_onaccept,
ondelete = self.org_organisation_ondelete,
referenced_by = [(utablename, "organisation_id")],
super_entity = "pr_pentity",
)
# Custom Method for S3OrganisationAutocompleteWidget
self.set_method("org", "organisation",
method = "search_ac",
action = self.org_search_ac)
# Components
add_components(tablename,
# Documents
doc_document = "organisation_id",
doc_image = "organisation_id",
# Groups
org_group = {"link": "org_group_membership",
"joinby": "organisation_id",
"key": "group_id",
"actuate": "hide",
},
# Format for InlineComponent/filter_widget
org_group_membership = "organisation_id",
# Names
org_organisation_name = {"name": "name",
"joinby": "organisation_id",
},
# Sites
org_site = "organisation_id",
# Facilities
org_facility = "organisation_id",
# Offices
org_office = "organisation_id",
# Warehouses
inv_warehouse = "organisation_id",
# Staff/Volunteers
hrm_human_resource = "organisation_id",
# Members
member_membership = "organisation_id",
# Evacuees
evr_case = "organisation_id",
# Locations served
gis_location = {"link": "org_organisation_location",
"joinby": "organisation_id",
"key": "location_id",
"actuate": "hide",
},
# Format for filter_widget
org_organisation_location = "organisation_id",
# Types
org_organisation_type = {"link": "org_organisation_organisation_type",
"joinby": "organisation_id",
"key": "organisation_type_id",
"multiple": multiple_organisation_types,
"actuate": "hide",
},
# Format for filter_widget
org_organisation_organisation_type = "organisation_id",
# Catalogs
supply_catalog = "organisation_id",
# Resources
org_resource = "organisation_id",
# Sectors
org_sector = {"link": "org_sector_organisation",
"joinby": "organisation_id",
"key": "sector_id",
"actuate": "hide",
},
# Format for filter_widget
org_sector_organisation = "organisation_id",
# Services
org_service = {"link": "org_service_organisation",
"joinby": "organisation_id",
"key": "service_id",
"actuate": "hide",
},
# Format for filter_widget
org_service_organisation = "organisation_id",
# Assets
asset_asset = "organisation_id",
# Needs
req_organisation_needs = {"name": "needs",
"joinby": "organisation_id",
"multiple": False,
},
# Requests
#req_req = "donated_by_id",
# Enable this to allow migration of users between instances
#auth_user = "organisation_id",
# Related Organisations
org_organisation = (# Branches
{"name": "branch",
"link": "org_organisation_branch",
"joinby": "organisation_id",
"key": "branch_id",
"actuate": "embed",
"autocomplete": "name",
"autodelete": True,
},
# Parent (for imports)
{"name": "parent",
"link": "org_organisation_branch",
"joinby": "branch_id",
"key": "organisation_id",
"actuate": "embed",
"autocomplete": "name",
"autodelete": False,
},
),
)
# Projects
if settings.get_project_multiple_organisations():
# Use link table
add_components(tablename,
project_project = {"link": "project_organisation",
"joinby": "organisation_id",
"key": "project_id",
# Embed widget doesn't currently
# support 2 fields of same name (8 hours)
#"actuate": "embed",
"actuate": "hide",
"autocomplete": "name",
"autodelete": False,
},
# Format for filter_widget
project_organisation = {"name": "project_organisation",
"joinby": "organisation_id",
},
)
else:
# Direct link
add_components(tablename,
project_project = "organisation_id",
)
# Organisation Summary data
if settings.get_org_summary():
add_components(tablename,
org_organisation_summary = {"name": "summary",
"joinby": "organisation_id",
"multiple": False,
},
)
# ---------------------------------------------------------------------
# Organisation <-> Organisation Type
#
tablename = "org_organisation_organisation_type"
define_table(tablename,
organisation_id(empty = False,
ondelete = "CASCADE",
),
organisation_type_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
configure(tablename,
xml_post_parse = self.org_organisation_organisation_type_xml_post_parse,
)
# ---------------------------------------------------------------------
# Organisation <-> User
#
utable = auth.settings.table_user
tablename = "org_organisation_user"
define_table(tablename,
Field("user_id", utable),
organisation_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(org_organisation_type_id = organisation_type_id,
org_organisation_id = organisation_id,
org_organisation_represent = org_organisation_represent,
org_region_represent = region_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def org_organisation_onaccept(form):
"""
If a logo was uploaded then create the extra versions.
Process injected fields
"""
newfilename = form.vars.logo_newfilename
if newfilename:
s3db = current.s3db
image = form.request_vars.logo
s3db.pr_image_resize(image.file,
newfilename,
image.filename,
(None, 60),
)
s3db.pr_image_modify(image.file,
newfilename,
image.filename,
(None, 60),
"bmp",
)
# Set default root_organisation ID
db = current.db
try:
record_id = form.vars.id
except AttributeError:
pass
else:
otable = db.org_organisation
query = (otable.id == record_id) & \
(otable.root_organisation == None)
db(query).update(root_organisation = otable.id)
# Process Injected Fields
if not current.deployment_settings.get_org_summary():
return
post_vars = current.request.post_vars
record_id = post_vars.id
if not record_id:
# Not a POST request (e.g. import), hence no injected fields either
return
table = current.s3db.org_organisation_summary
query = (table.organisation_id == record_id)
existing = db(query).select(table.id,
limitby=(0, 1)).first()
if "national_staff" in post_vars:
national_staff = post_vars.national_staff
else:
national_staff = None
if "international_staff" in post_vars:
international_staff = post_vars.international_staff
else:
international_staff = None
if existing:
db(query).update(national_staff=national_staff,
international_staff=international_staff
)
elif national_staff or international_staff:
table.insert(organisation_id=id,
national_staff=national_staff,
international_staff=international_staff
)
return
# -------------------------------------------------------------------------
@staticmethod
def org_organisation_ondelete(row):
"""
If an Org is deleted then remove Logo
"""
db = current.db
table = db.org_organisation
deleted_row = db(table.id == row.id).select(table.logo,
limitby=(0, 1)
).first()
if deleted_row and deleted_row.logo:
current.s3db.pr_image_delete_all(deleted_row.logo)
# -------------------------------------------------------------------------
@staticmethod
def org_organisation_organisation_type_xml_post_parse(element, record):
"""
Check for defaults provided by project/organisation.xsl
"""
org_type_default = element.xpath('data[@field="_organisation_type_id"]')
if org_type_default:
org_type_default = org_type_default[0].text
db = current.db
table = db.org_organisation_type
row = None
# These default mappings can be overridden per-deployment
if org_type_default == "Donor":
row = db(table.name == "Bilateral").select(table.id,
cache=current.s3db.cache,
limitby=(0, 1)).first()
elif org_type_default == "Partner":
row = db(table.name == "NGO").select(table.id,
cache=current.s3db.cache,
limitby=(0, 1)).first()
elif org_type_default in ("Host National Society",
"Partner National Society"):
row = db(table.name == "Red Cross / Red Crescent").select(table.id,
cache=current.s3db.cache,
limitby=(0, 1)
).first()
if row:
# Note this sets only the default, so won't override existing or explicit values
record._organisation_type_id = row.id
# -------------------------------------------------------------------------
@staticmethod
def organisation_type_duplicate(item):
""" Import item de-duplication """
name = item.data.get("name", None)
table = item.table
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def org_region_duplicate(item):
""" Import item de-duplication """
name = item.data.get("name", None)
table = item.table
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -----------------------------------------------------------------------------
@staticmethod
def organisation_duplicate(item):
"""
Import item deduplication, match by name
NB: usually, this is only needed to catch cases where the
import item is misspelled (case mismatch), otherwise the
org name is a primary key and matches automatically.
However, if there's a spelling mistake, we would want to
retain the original spelling *because* the name is a
primary key.
@param item: the S3ImportItem instance
"""
name = item.data.get("name", None)
if name:
table = item.table
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
table.name,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
# Retain the correct spelling of the name
item.data.name = duplicate.name
item.method = item.METHOD.UPDATE
# -----------------------------------------------------------------------------
@staticmethod
def org_search_ac(r, **attr):
"""
JSON search method for S3OrganisationAutocompleteWidget
- searches name & acronym for both this organisation & the parent
of branches
@param r: the S3Request
@param attr: request attributes
"""
_vars = current.request.get_vars
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower().strip()
if not value:
output = current.xml.json_message(False, 400,
"Missing option! Require value")
raise HTTP(400, body=output)
response = current.response
resource = r.resource
table = resource.table
settings = current.deployment_settings
use_branches = settings.get_org_branches()
search_l10n = settings.get_L10n_translate_org_organisation()
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
query = (FS("organisation.name").lower().like(value + "%")) | \
(FS("organisation.acronym").lower().like(value + "%"))
if use_branches:
query |= (FS("parent.name").lower().like(value + "%")) | \
(FS("parent.acronym").lower().like(value + "%"))
if search_l10n:
query |= (FS("name.name_l10n").lower().like(value + "%")) | \
(FS("name.acronym_l10n").lower().like(value + "%"))
resource.add_filter(query)
MAX_SEARCH_RESULTS = settings.get_search_max_results()
limit = int(_vars.limit or MAX_SEARCH_RESULTS)
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = json.dumps([
dict(label=str(current.T("There are more than %(max)s results, please input more characters.") % dict(max=MAX_SEARCH_RESULTS)))
], separators=SEPARATORS)
else:
field = table.name
field2 = table.acronym
# Fields to return
fields = ["id",
"name",
"acronym",
]
if use_branches:
fields.append("parent.name")
if search_l10n:
fields += ["name.name_l10n",
"name.acronym_l10n",
]
rows = resource.select(fields,
start=0,
limit=limit,
orderby=field,
as_rows=True)
output = []
append = output.append
for row in rows:
acronym = ""
if use_branches or search_l10n:
_row = row[table]
else:
_row = row
if search_l10n:
name = row["org_organisation_name.name_l10n"] or _row.name
acronym = row["org_organisation_name.acronym_l10n"] or _row.acronym
else:
name = _row.name
acronym = _row.acronym
record = dict(id = _row.id,
name = name,
)
if acronym:
record["acronym"] = acronym
if "org_parent_organisation" in row:
parent = object.__getattribute__(row, "org_parent_organisation")
if parent.name is not None:
record["parent"] = parent.name
# Determine if input is org hit or acronym hit
value_len = len(value)
orgNameHit = name[:value_len].lower() == value
if orgNameHit:
nextString = name[value_len:]
if nextString != "":
record["matchString"] = name[:value_len]
record["nextString"] = nextString
else:
nextString = acronym[value_len:]
if nextString != "":
record["matchString"] = acronym[:value_len]
record["nextString"] = nextString
record["match"] = "acronym"
append(record)
output = json.dumps(output, separators=SEPARATORS)
response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class S3OrganisationNameModel(S3Model):
"""
Location Names model
- local names/acronyms for Organisations
"""
names = ("org_organisation_name",
)
def model(self):
T = current.T
l10n_languages = current.deployment_settings.get_L10n_languages()
# ---------------------------------------------------------------------
# Local Names
#
tablename = "org_organisation_name"
self.define_table(tablename,
self.org_organisation_id(empty = False,
ondelete = "CASCADE",
),
Field("language",
label = T("Language"),
represent = lambda opt: l10n_languages.get(opt,
current.messages.UNKNOWN_OPT),
requires = IS_ISO639_2_LANGUAGE_CODE(),
),
Field("name_l10n",
label = T("Local Name"),
),
Field("acronym_l10n",
label = T("Local Acronym"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = self.org_organisation_name_deduplicate,
)
# Pass names back to global scope (s3.*)
return dict()
# -------------------------------------------------------------------------
@staticmethod
def org_organisation_name_deduplicate(item):
"""
If the record is a duplicate then it will set the item method to update
"""
data = item.data
language = data.get("language", None)
org = data.get("organisation_id", None)
if not language or not org:
return
table = item.table
query = (table.language == language) & \
(table.organisation_id == org)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3OrganisationBranchModel(S3Model):
"""
Organisation Branches
"""
names = ("org_organisation_branch",)
def model(self):
T = current.T
organisation_id = self.org_organisation_id
# ---------------------------------------------------------------------
# Organisation Branches
#
tablename = "org_organisation_branch"
self.define_table(tablename,
organisation_id(ondelete = "CASCADE"),
organisation_id("branch_id",
default = None,
label = T("Branch"),
ondelete = "CASCADE",
),
*s3_meta_fields())
# CRUD strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Branch Organization"),
title_display = T("Branch Organization Details"),
title_list = T("Branch Organizations"),
title_update = T("Edit Branch Organization"),
#title_upload = T("Import Branch Organizations"),
label_list_button = T("List Branch Organizations"),
label_delete_button = T("Delete Branch"),
msg_record_created = T("Branch Organization added"),
msg_record_modified = T("Branch Organization updated"),
msg_record_deleted = T("Branch Organization deleted"),
msg_list_empty = T("No Branch Organizations currently registered"))
self.configure(tablename,
deduplicate = self.org_branch_duplicate,
onaccept = self.org_branch_onaccept,
ondelete = self.org_branch_ondelete,
onvalidation = self.org_branch_onvalidation,
)
# -----------------------------------------------------------------------------
@staticmethod
def org_branch_duplicate(item):
"""
An Organisation can only be a branch of one Organisation
"""
branch_id = item.data.get("branch_id")
if branch_id:
table = item.table
query = (table.branch_id == branch_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def org_branch_onvalidation(form):
"""
Prevent an Organisation from being a Branch of itself
- this is for interactive forms, imports are caught in .xsl
"""
request_vars = form.request_vars
if request_vars and \
request_vars.branch_id and \
int(request_vars.branch_id) == int(request_vars.organisation_id):
error = current.T("Cannot make an Organization a branch of itself!")
form.errors["branch_id"] = error
current.response.error = error
# -------------------------------------------------------------------------
@staticmethod
def org_branch_onaccept(form):
"""
Remove any duplicate memberships and update affiliations
"""
_id = form.vars.id
db = current.db
s3db = current.s3db
# Fields a branch organisation inherits from its parent organisation
# (components added later)
inherit = ["region_id",
"country",
]
otable = s3db.org_organisation
ltable = db.org_organisation_branch
btable = db.org_organisation.with_alias("org_branch_organisation")
ifields = [otable[fn] for fn in inherit] + \
[btable[fn] for fn in inherit]
left = [otable.on(ltable.organisation_id == otable.id),
btable.on(ltable.branch_id == btable.id)]
record = db(ltable.id == _id).select(otable.root_organisation,
btable.root_organisation,
ltable.branch_id,
ltable.organisation_id,
ltable.deleted,
ltable.deleted_fk,
*ifields,
left=left,
limitby=(0, 1)).first()
if record:
organisation = record.org_organisation
branch = record.org_branch_organisation
link = record.org_organisation_branch
branch_id = link.branch_id
organisation_id = link.organisation_id
if branch_id and organisation_id and not link.deleted:
# Eliminate duplicate affiliations
query = (ltable.branch_id == branch_id) & \
(ltable.organisation_id == organisation_id) & \
(ltable.id != _id) & \
(ltable.deleted != True)
deleted_fk = {"branch_id": branch_id,
"organisation_id": organisation_id,
}
db(query).update(deleted=True,
branch_id=None,
organisation_id=None,
deleted_fk=json.dumps(deleted_fk))
# Inherit fields from parent organisation
update = dict((field, organisation[field])
for field in inherit
if not branch[field] and organisation[field])
if update:
db(otable.id == branch_id).update(**update)
record_ids = (organisation_id, branch_id)
# Inherit Org Types
ltable = db.org_organisation_organisation_type
rows = db(ltable.organisation_id.belongs(record_ids)) \
.select(ltable.organisation_id,
ltable.organisation_type_id,
)
org_types = set()
branch_types = set()
for row in rows:
if row.organisation_id == organisation_id:
org_types.add(row.organisation_type_id)
else:
branch_types.add(row.organisation_type_id)
for t in org_types - branch_types:
ltable.insert(organisation_id = branch_id,
organisation_type_id = t,
)
# Inherit Org Sectors
ltable = s3db.org_sector_organisation
rows = db(ltable.organisation_id.belongs(record_ids)) \
.select(ltable.organisation_id,
ltable.sector_id,
)
org_sectors = set()
branch_sectors = set()
for row in rows:
if row.organisation_id == organisation_id:
org_sectors.add(row.sector_id)
else:
branch_sectors.add(row.sector_id)
for s in org_sectors - branch_sectors:
ltable.insert(organisation_id = branch_id,
sector_id = s,
)
org_update_affiliations("org_organisation_branch", link)
# Update the root organisation
if link.deleted or \
branch.root_organisation is None or \
branch.root_organisation != organisation.root_organisation:
org_update_root_organisation(branch_id)
# -------------------------------------------------------------------------
@staticmethod
def org_branch_ondelete(row):
"""
Update affiliations
"""
db = current.db
table = db.org_organisation_branch
record = db(table.id == row.id).select(table.branch_id,
table.deleted,
table.deleted_fk,
limitby=(0, 1)).first()
if record:
org_update_affiliations("org_organisation_branch", record)
# =============================================================================
class S3OrganisationGroupModel(S3Model):
"""
Organisation Group Model
- 'Coalitions' or 'Networks'
"""
names = ("org_group",
"org_group_membership",
"org_group_membership_status",
"org_group_id",
"org_group_represent",
)
def model(self):
T = current.T
db = current.db
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
NONE = current.messages["NONE"]
# ---------------------------------------------------------------------
# Organization Groups
#
tablename = "org_group"
define_table(tablename,
super_link("doc_id", "doc_entity"),
super_link("pe_id", "pr_pentity"),
Field("name", notnull=True, unique=True, length=128,
label = T("Name"),
),
Field("mission",
label = T("Mission"),
represent = lambda v: v or NONE,
# Enable as-required in Custom Forms
readable = False,
writable = False,
),
Field("website",
label = T("Website"),
represent = s3_url_represent,
requires = IS_EMPTY_OR(IS_URL()),
),
Field("meetings",
label = T("Meetings"),
represent = lambda v: v or NONE,
# Enable as-required in Custom Forms
readable = False,
writable = False,
),
self.gis_location_id(
widget = S3LocationSelector(#catalog_layers = True,
points = False,
polygons = True,
)
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
label = current.deployment_settings.get_org_groups()
if label == "Coalition":
crud_strings[tablename] = Storage(
label_create = T("Create Coalition"),
title_display = T("Coalition Details"),
title_list = T("Coalitions"),
title_update = T("Update Coalition"),
label_list_button = T("List Coalitions"),
label_delete_button = T("Remove Coalition"),
msg_record_created = T("Coalition added"),
msg_record_modified = T("Coalition updated"),
msg_record_deleted = T("Coalition removed"),
msg_list_empty = T("No Coalitions currently recorded"))
elif label == "Network":
crud_strings[tablename] = Storage(
label_create = T("Create Network"),
title_display = T("Network Details"),
title_list = T("Networks"),
title_update = T("Edit Network"),
label_list_button = T("List Networks"),
label_delete_button = T("Remove Network"),
msg_record_created = T("Network added"),
msg_record_modified = T("Network updated"),
msg_record_deleted = T("Network removed"),
msg_list_empty = T("No Networks currently recorded"))
else:
# Functionality is disabled but model is being loaded via load_all_models()
label = "Group"
configure(tablename,
list_fields = ["name",
"comments",
],
super_entity = ("doc_entity", "pr_pentity"),
)
group_represent = S3Represent(lookup=tablename)
group_id = S3ReusableField("group_id", "reference %s" % tablename,
label = T(label),
# Always links via Link Tables
ondelete = "CASCADE",
represent = group_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_group.id",
group_represent,
sort=True,
)),
sortby = "name",
)
# Components
self.add_components(tablename,
org_group_membership = {"name": "membership",
"joinby": "group_id",
},
org_organisation = {"joinby": "group_id",
"key": "organisation_id",
"link": "org_group_membership",
"actuate": "replace",
},
pr_group = {"name": "pr_group",
"joinby": "org_group_id",
"key": "group_id",
"link": "org_group_team",
"actuate": "replace",
},
)
# Custom Method to Assign Orgs
self.set_method("org", "group",
method = "assign",
action = org_AssignMethod(component="membership"))
# ---------------------------------------------------------------------
# Group membership status
#
tablename = "org_group_membership_status"
define_table(tablename,
Field("name", notnull=True, unique=True, length=128,
label = T("Name"),
),
s3_comments(),
*s3_meta_fields()
)
crud_strings[tablename] = Storage(
label_create = T("Create Status"),
title_display = T("Status Details"),
title_list = T("Statuses"),
title_update = T("Edit Status"),
label_list_button = T("List Statuses"),
label_delete_button = T("Delete Status"),
msg_record_created = T("Status added"),
msg_record_modified = T("Status updated"),
msg_record_deleted = T("Status removed"),
msg_list_empty = T("No Statuses currently defined"))
represent = S3Represent(lookup=tablename)
status_id = S3ReusableField("status_id", "reference %s" % tablename,
label = T("Status"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_group_membership_status.id",
represent,
sort=True,
)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Group membership
#
tablename = "org_group_membership"
define_table(tablename,
group_id(empty = False,
ondelete = "CASCADE",
),
self.org_organisation_id(empty = False,
ondelete = "CASCADE",
),
status_id(),
*s3_meta_fields()
)
configure(tablename,
onaccept = self.group_membership_onaccept,
ondelete = self.group_membership_onaccept,
)
# Pass names back to global scope (s3.*)
return dict(org_group_id = group_id,
org_group_represent = group_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def group_membership_onaccept(form):
"""
Remove any duplicate memberships and update affiliations
"""
if hasattr(form, "vars"):
_id = form.vars.id
elif isinstance(form, Row) and "id" in form:
_id = form.id
else:
return
db = current.db
mtable = db.org_group_membership
if _id:
record = db(mtable.id == _id).select(limitby=(0, 1)).first()
else:
return
if record:
organisation_id = record.organisation_id
group_id = record.group_id
if organisation_id and group_id and not record.deleted:
query = (mtable.organisation_id == organisation_id) & \
(mtable.group_id == group_id) & \
(mtable.id != record.id) & \
(mtable.deleted != True)
deleted_fk = {"organisation_id": organisation_id,
"group_id": group_id}
db(query).update(deleted = True,
organisation_id = None,
group_id = None,
deleted_fk = json.dumps(deleted_fk))
org_update_affiliations("org_group_membership", record)
# =============================================================================
class S3OrganisationGroupPersonModel(S3Model):
"""
Link table between Organisation Groups & Persons
"""
names = ("org_group_person_status",
"org_group_person",
)
def model(self):
T = current.T
db = current.db
define_table = self.define_table
crud_strings = current.response.s3.crud_strings
# ---------------------------------------------------------------------
# Person<=>Organisation Group membership status
#
tablename = "org_group_person_status"
define_table(tablename,
Field("name", notnull=True, unique=True, length=128,
label = T("Name"),
),
s3_comments(),
*s3_meta_fields()
)
crud_strings[tablename] = Storage(
label_create = T("Create Status"),
title_display = T("Status Details"),
title_list = T("Statuses"),
title_update = T("Edit Status"),
label_list_button = T("List Statuses"),
label_delete_button = T("Delete Status"),
msg_record_created = T("Status added"),
msg_record_modified = T("Status updated"),
msg_record_deleted = T("Status removed"),
msg_list_empty = T("No Statuses currently defined"))
represent = S3Represent(lookup=tablename)
status_id = S3ReusableField("status_id", "reference %s" % tablename,
label = T("Status"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_group_person_status.id",
represent,
sort=True,
)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Link table between Organisation Groups & Persons
#
tablename = "org_group_person"
define_table(tablename,
self.org_group_id("org_group_id",
empty = False,
ondelete = "CASCADE",
),
self.pr_person_id(empty = False,
ondelete = "CASCADE",
),
status_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3OrganisationGroupTeamModel(S3Model):
"""
Link table between Organisation Groups & Teams
"""
names = ("org_group_team",)
def model(self):
#T = current.T
# ---------------------------------------------------------------------
# Link table between Organisation Groups & Teams
#
tablename = "org_group_team"
self.define_table(tablename,
self.org_group_id("org_group_id",
empty = False,
ondelete = "CASCADE",
),
self.pr_group_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.org_group_team_onaccept,
)
# Pass names back to global scope (s3.*)
return dict()
# -------------------------------------------------------------------------
@staticmethod
def org_group_team_onaccept(form):
"""
Update affiliations
"""
if hasattr(form, "vars"):
_id = form.vars.id
elif isinstance(form, Row) and "id" in form:
_id = form.id
else:
return
if not _id:
return
db = current.db
table = db.org_group_team
record = db(table.id == _id).select(table.group_id,
table.org_group_id,
limitby=(0, 1)).first()
if record:
org_group = ("org_organisation", record.org_group_id)
pr_group = ("pr_group", record.group_id)
current.s3db.pr_add_affiliation(org_group, pr_group,
role="Groups",
role_type=1) # 1 = OU
# =============================================================================
class S3OrganisationLocationModel(S3Model):
"""
Organisation Location Model
"""
names = ("org_organisation_location",)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Organizations <> Locations Link Table
#
tablename = "org_organisation_location"
self.define_table(tablename,
self.org_organisation_id(),
self.gis_location_id(
requires = IS_LOCATION(),
#represent = self.gis_LocationRepresent(sep=", "),
widget = S3LocationAutocompleteWidget()
),
*s3_meta_fields()
)
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("New Location"),
title_display = T("Location"),
title_list = T("Locations"),
title_update = T("Edit Location"),
title_upload = T("Import Location data"),
label_list_button = T("List Locations"),
msg_record_created = T("Location added to Organization"),
msg_record_modified = T("Location updated"),
msg_record_deleted = T("Location removed from Organization"),
msg_list_empty = T("No Locations found for this Organization"))
self.configure(tablename,
deduplicate = self.org_organisation_location_deduplicate,
)
# Pass names back to global scope (s3.*)
return dict()
# -------------------------------------------------------------------------
@staticmethod
def org_organisation_location_deduplicate(item):
""" Import item de-duplication """
data = item.data
organisation_id = data.get("organisation_id")
location_id = data.get("location_id")
if organisation_id and location_id:
table = item.table
query = (table.organisation_id == organisation_id) & \
(table.location_id == location_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3OrganisationResourceModel(S3Model):
"""
Organisation Resource Model
- depends on Stats module
"""
names = ("org_resource",
"org_resource_type",
)
def model(self):
#settings = current.deployment_settings
if not current.deployment_settings.has_module("stats"):
current.log.warning("Organisation Resource Model needs Stats module enabling")
return dict()
T = current.T
#auth = current.auth
crud_strings = current.response.s3.crud_strings
super_link = self.super_link
configure = self.configure
# ---------------------------------------------------------------------
# Resource Type data
#
tablename = "org_resource_type"
self.define_table(tablename,
super_link("parameter_id", "stats_parameter"),
Field("name",
label = T("Resource Type"),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_RESOURCE_TYPE = T("Create Resource Type")
crud_strings[tablename] = Storage(
label_create = ADD_RESOURCE_TYPE,
title_display = T("Resource Type Details"),
title_list = T("Resource Types"),
title_update = T("Edit Resource Type"),
title_upload = T("Import Resource Types"),
label_list_button = T("Resource Types"),
label_delete_button = T("Delete Resource Type"),
msg_record_created = T("Resource Type added"),
msg_record_modified = T("Resource Type updated"),
msg_record_deleted = T("Resource Type deleted"),
msg_list_empty = T("No Resource Types defined"))
# Resource Configuration
configure(tablename,
super_entity = "stats_parameter",
)
# ---------------------------------------------------------------------
# Resource data
#
tablename = "org_resource"
self.define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
self.org_organisation_id(ondelete="CASCADE"),
# Add this when deprecating S3OfficeSummaryModel
#self.super_link("site_id", "org_site",
# label=current.deployment_settings.get_org_site_label(),
# instance_types = auth.org_site_types,
# orderby = "org_site.name",
# realms = auth.permission.permitted_realms("org_site",
# method="create"),
# not_filterby = "obsolete",
# not_filter_opts = (True,),
# readable = True,
# writable = True,
# represent = self.org_site_represent,
# ),
self.gis_location_id(),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Resource Type"),
instance_types = ("org_resource_type",),
represent = S3Represent(lookup="stats_parameter",
translate=True),
readable = True,
writable = True,
empty = False,
comment = S3AddResourceLink(c="org",
f="resource_type",
vars = dict(child = "parameter_id"),
title=ADD_RESOURCE_TYPE),
),
Field("value", "integer",
label = T("Quantity"),
requires = IS_INT_IN_RANGE(0, 999999),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Resource"),
title_display = T("Resource Details"),
title_list = T("Resource Inventory"),
title_update = T("Edit Resource"),
title_map = T("Map of Resources"),
title_upload = T("Import Resources"),
label_list_button = T("Resource Inventory"),
label_delete_button = T("Delete Resource"),
msg_record_created = T("Resource added"),
msg_record_modified = T("Resource updated"),
msg_record_deleted = T("Resource deleted"),
msg_list_empty = T("No Resources in Inventory"))
# Filter Widgets
filter_widgets = [S3TextFilter(["organisation_id$name",
"location_id",
"parameter_id$name",
"comments",
],
label = T("Search")),
S3OptionsFilter("parameter_id",
label = T("Type"),
),
]
# Report options
report_fields = ["organisation_id",
"parameter_id",
]
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = [(T("Total Number of Resources"), "sum(value)"),
(T("Number of Resources"), "count(value)"),
],
defaults=Storage(rows = "organisation_id",
cols = "parameter_id",
fact = "sum(value)",
totals = True,
chart = "barchart:rows",
#table = "collapse",
)
)
configure(tablename,
context = {"location": "location_id",
"organisation": "organisation_id",
},
filter_widgets = filter_widgets,
list_layout = org_resource_list_layout,
report_options = report_options,
super_entity = "stats_data",
)
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3OrganisationSectorModel(S3Model):
"""
Organisation Sector Model
"""
names = ("org_sector",
"org_sector_id",
#"org_subsector",
"org_sector_organisation",
)
def model(self):
T = current.T
db = current.db
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
NONE = current.messages["NONE"]
location = current.session.s3.location_filter
if location:
filterby = "location_id"
filter_opts = (location, None)
else:
filterby = None
filter_opts = (None,)
# ---------------------------------------------------------------------
# Sector
# (Cluster in UN-style terminology)
#
tablename = "org_sector"
define_table(tablename,
Field("name", length=128, notnull=True,
label = T("Name"),
represent = lambda v: T(v) if v is not None \
else NONE,
),
Field("abrv", length=64, #notnull=True,
label = T("Abbreviation"),
),
self.gis_location_id(
requires = IS_EMPTY_OR(IS_LOCATION()),
widget = S3LocationAutocompleteWidget(),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
if current.deployment_settings.get_ui_label_cluster():
SECTOR = T("Cluster")
ADD_SECTOR = T("Create Cluster")
help = T("If you don't see the Cluster in the list, you can add a new one by clicking link 'Add New Cluster'.")
crud_strings[tablename] = Storage(
label_create = ADD_SECTOR,
title_display = T("Cluster Details"),
title_list = T("Clusters"),
title_update = T("Edit Cluster"),
label_list_button = T("List Clusters"),
label_delete_button = T("Delete Cluster"),
msg_record_created = T("Cluster added"),
msg_record_modified = T("Cluster updated"),
msg_record_deleted = T("Cluster deleted"),
msg_list_empty = T("No Clusters currently registered"))
else:
SECTOR = T("Sector")
ADD_SECTOR = T("Create Sector")
help = T("If you don't see the Sector in the list, you can add a new one by clicking link 'Create Sector'.")
crud_strings[tablename] = Storage(
label_create = ADD_SECTOR,
title_display = T("Sector Details"),
title_list = T("Sectors"),
title_update = T("Edit Sector"),
label_list_button = T("List Sectors"),
label_delete_button = T("Delete Sector"),
msg_record_created = T("Sector added"),
msg_record_modified = T("Sector updated"),
msg_record_deleted = T("Sector deleted"),
msg_list_empty = T("No Sectors currently registered"))
configure("org_sector",
deduplicate = self.org_sector_duplicate,
onaccept = self.org_sector_onaccept,
)
sector_comment = lambda child: S3AddResourceLink(c="org", f="sector",
vars={"child": child},
label=ADD_SECTOR,
title=SECTOR,
tooltip=help)
represent = S3Represent(lookup=tablename, translate=True)
sector_id = S3ReusableField("sector_id", "reference %s" % tablename,
label = SECTOR,
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_sector.id",
represent,
sort=True,
filterby=filterby,
filter_opts=filter_opts,
)),
sortby = "abrv",
comment = sector_comment("sector_id"),
)
# Components
add_components(tablename,
org_organisation = {"link": "org_sector_organisation",
"joinby": "sector_id",
"key": "organisation_id",
"actuate": "hide",
},
project_project = {"link": "project_sector_project",
"joinby": "sector_id",
"key": "project_id",
"actuate": "hide",
},
#project_activity_type = {"link": "project_activity_type_sector",
# "joinby": "sector_id",
# "key": "activity_type_id",
# "actuate": "hide",
# },
#project_theme = {"link": "project_theme_sector",
# "joinby": "sector_id",
# "key": "theme_id",
# "actuate": "hide",
# },
#org_subsector = "sector_id",
)
# =====================================================================
# (Cluster) Subsector
#
# tablename = "org_subsector"
# define_table(tablename,
# sector_id(),
# Field("name", length=128,
# label = T("Name")),
# Field("abrv", length=64,
# notnull=True, unique=True,
# label = T("Abbreviation")),
# *s3_meta_fields())
##CRUD strings
# if settings.get_ui_label_cluster():
# SUBSECTOR = T("Cluster Subsector")
# crud_strings[tablename] = Storage(
# label_create = T("Create Cluster Subsector"),
# title_display = T("Cluster Subsector Details"),
# title_list = T("Cluster Subsectors"),
# title_update = T("Edit Cluster Subsector"),
# label_list_button = T("List Cluster Subsectors"),
# label_delete_button = T("Delete Cluster Subsector"),
# msg_record_created = T("Cluster Subsector added"),
# msg_record_modified = T("Cluster Subsector updated"),
# msg_record_deleted = T("Cluster Subsector deleted"),
# msg_list_empty = T("No Cluster Subsectors currently registered"))
# else:
# SUBSECTOR = T("Subsector")
# crud_strings[tablename] = Storage(
# label_create = T("Add Subsector"),
# title_display = T("Subsector Details"),
# title_list = T("Subsectors"),
# title_update = T("Edit Subsector"),
# label_list_button = T("List Subsectors"),
# label_delete_button = T("Delete Subsector"),
# msg_record_created = T("Subsector added"),
# msg_record_modified = T("Subsector updated"),
# msg_record_deleted = T("Subsector deleted"),
# msg_list_empty = T("No Subsectors currently registered"))
# subsector_id = S3ReusableField("subsector_id", "reference %s" % tablename,
# label = SUBSECTOR,
# ondelete = "SET NULL",
# represent = self.org_subsector_represent,
# requires = IS_EMPTY_OR(
# IS_ONE_OF(db, "org_subsector.id",
# self.org_subsector_represent,
# sort=True)),
# sortby = "abrv",
# #comment = Script to filter the sector_subsector drop down
# )
# configure("org_subsector",
# deduplicate = self.org_sector_duplicate,
# )
# ---------------------------------------------------------------------
# Organizations <> Sectors Link Table
#
tablename = "org_sector_organisation"
define_table(tablename,
sector_id(),
self.org_organisation_id(),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Sector"),
title_display = T("Sector"),
title_list = T("Sectors"),
title_update = T("Edit Sector"),
title_upload = T("Import Sector data"),
label_list_button = T("List Sectors"),
msg_record_created = T("Sector added to Organization"),
msg_record_modified = T("Sector updated"),
msg_record_deleted = T("Sector removed from Organization"),
msg_list_empty = T("No Sectors found for this Organization"))
configure(tablename,
deduplicate = self.org_sector_organisation_deduplicate,
)
# Pass names back to global scope (s3.*)
return dict(org_sector_id = sector_id,
)
# -------------------------------------------------------------------------
@staticmethod
def org_sector_duplicate(item):
""" Import item de-duplication """
data = item.data
abrv = data.get("abrv")
name = data.get("name")
table = item.table
if abrv:
query = (table.abrv.lower() == abrv.lower())
elif name:
query = (table.name.lower() == name.lower())
else:
return
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def org_sector_onaccept(form):
""" If no abrv is set then set it from the name """
_id = form.vars.id
# Read the record
db = current.db
table = db.org_sector
record = db(table.id == _id).select(table.abrv,
table.name,
limitby=(0, 1)).first()
if not record.abrv:
db(table.id == _id).update(abrv = record.name[:64])
# -------------------------------------------------------------------------
#@staticmethod
#def org_subsector_represent(id, row=None):
# """ Subsector ID representation """
# if row:
# return row.name
# elif not id:
# return current.messages["NONE"]
# db = current.db
# table = db.org_subsector
# r = db(table.id == id).select(table.name,
# table.sector_id,
# limitby=(0, 1)
# ).first()
# try:
# sector = db(table.id == r.sector_id).select(table.abrv,
# limitby=(0, 1)
# ).first()
# if sector:
# return "%s: %s" % (sector.abrv, current.T(r.name))
# else:
# return current.T(r.name)
# except:
# return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def org_sector_organisation_deduplicate(item):
""" Import item de-duplication """
data = item.data
organisation_id = data.get("organisation_id")
sector_id = data.get("sector_id")
if organisation_id and sector_id:
table = item.table
query = (table.organisation_id == organisation_id) & \
(table.sector_id == sector_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3OrganisationServiceModel(S3Model):
"""
Organisation Service Model
"""
names = ("org_service",
"org_service_organisation",
)
def model(self):
T = current.T
db = current.db
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
hierarchical_service_types = current.deployment_settings.get_org_services_hierarchical()
# ---------------------------------------------------------------------
# Service
#
tablename = "org_service"
define_table(tablename,
Field("name", length=128, notnull=True,
# Comment this if we need to support the same service at different locations in hierarchy
unique = True,
label = T("Name"),
),
Field("parent", "reference org_service", # This form of hierarchy may not work on all Databases
label = T("SubType of"),
ondelete = "RESTRICT",
readable = hierarchical_service_types,
writable = hierarchical_service_types,
),
s3_comments(),
*s3_meta_fields())
represent = S3Represent(lookup = tablename,
# Questionable UX:
#hierarchy = hierarchical_service_types,
translate = True,
)
if hierarchical_service_types:
hierarchy = "parent"
# Can't be defined in-line as otherwise get a circular reference
table = db[tablename]
table.parent.represent = represent
table.parent.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_service.id",
represent,
# If limiting to just 1 level of parent
#filterby="parent",
#filter_opts=(None,),
orderby="org_service.name"))
else:
hierarchy = None
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Service"),
title_display = T("Service Details"),
title_list = T("Services"),
title_update = T("Edit Service"),
title_upload = T("Import Services"),
label_list_button = T("List Services"),
label_delete_button = T("Delete Service"),
msg_record_created = T("Service added"),
msg_record_modified = T("Service updated"),
msg_record_deleted = T("Service deleted"),
msg_list_empty = T("No Services currently registered"))
# Reusable Field
service_id = S3ReusableField("service_id", "reference %s" % tablename,
label = T("Services"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_service.id",
represent,
sort=True)),
sortby = "name",
)
configure(tablename,
# If we need to support the same service at different locations in hierarchy
#deduplicate = self.org_service_deduplicate,
hierarchy = hierarchy,
)
# ---------------------------------------------------------------------
# Organizations <> Services Link Table
#
tablename = "org_service_organisation"
define_table(tablename,
service_id(),
self.org_organisation_id(),
*s3_meta_fields()
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Service"),
title_display = T("Service"),
title_list = T("Services"),
title_update = T("Edit Service"),
label_list_button = T("List Services"),
msg_record_created = T("Service added to Organization"),
msg_record_modified = T("Service updated"),
msg_record_deleted = T("Service removed from Organization"),
msg_list_empty = T("No Services found for this Organization"))
configure(tablename,
deduplicate = self.org_service_organisation_deduplicate,
)
# Pass names back to global scope (s3.*)
return dict()
# -------------------------------------------------------------------------
@staticmethod
def org_service_deduplicate(item):
""" Import item de-duplication """
data = item.data
name = data.get("name")
if name:
table = item.table
query = (table.name == name)
parent = data.get("parent")
if parent:
query &= (table.parent == parent)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def org_service_organisation_deduplicate(item):
""" Import item de-duplication """
data = item.data
organisation_id = data.get("organisation_id")
service_id = data.get("service_id")
if organisation_id and service_id:
table = item.table
query = (table.organisation_id == organisation_id) & \
(table.service_id == service_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3OrganisationSummaryModel(S3Model):
"""
Organisation Summary fields visible when settings.org.summary = True
@ToDo: Deprecate in favour of S3OrganisationResourceModel
"""
names = ("org_organisation_summary",)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Summary data
#
tablename = "org_organisation_summary"
self.define_table(tablename,
self.org_organisation_id(ondelete="CASCADE"),
Field("national_staff", "integer", # national is a reserved word in Postgres
label = T("# of National Staff"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 9999)),
),
Field("international_staff", "integer",
label = T("# of International Staff"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 9999)),
),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3OrganisationTeamModel(S3Model):
"""
Link table between Organisations & Teams
"""
names = ("org_organisation_team",)
def model(self):
# ---------------------------------------------------------------------
# Link table between Organisations & Teams
#
tablename = "org_organisation_team"
self.define_table(tablename,
self.org_organisation_id(ondelete="CASCADE",
empty=False,
),
self.pr_group_id(ondelete="CASCADE",
empty=False,
),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.org_team_onaccept,
)
# Pass names back to global scope (s3.*)
return dict()
# -------------------------------------------------------------------------
@staticmethod
def org_team_onaccept(form):
"""
Update affiliations
"""
if hasattr(form, "vars"):
_id = form.vars.id
elif isinstance(form, Row) and "id" in form:
_id = form.id
else:
return
if not _id:
return
db = current.db
table = db.org_organisation_team
record = db(table.id == _id).select(table.group_id,
table.organisation_id,
limitby=(0, 1)).first()
if record:
org = ("org_organisation", record.organisation_id)
group = ("pr_group", record.group_id)
current.s3db.pr_add_affiliation(org, group,
role="Groups",
role_type=1) # 1 = OU
# =============================================================================
class S3OrganisationTypeTagModel(S3Model):
"""
Organisation Type Tags
"""
names = ("org_organisation_type_tag",)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Local Names
#
# ---------------------------------------------------------------------
# Organisation Type Tags
# - Key-Value extensions
# - can be used to provide conversions to external systems, such as:
# * HXL
# - can be a Triple Store for Semantic Web support
#
tablename = "org_organisation_type_tag"
self.define_table(tablename,
self.org_organisation_type_id(),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3SiteModel(S3Model):
"""
Site Super-Entity
"""
names = ("org_site",
"org_site_requires",
"org_site_id",
"org_site_represent",
)
def model(self):
T = current.T
auth = current.auth
messages = current.messages
add_components = self.add_components
set_method = self.set_method
# =====================================================================
# Site / Facility (ICS terminology)
#
# Site is a generic type for any facility (office, hospital, shelter,
# warehouse, project site etc.) and serves the same purpose as pentity does for person
# entity types: It provides a common join key name across all types of
# sites, with a unique value for each sites. This allows other types that
# are useful to any sort of site to have a common way to join to any of
# them. It's especially useful for component types.
#
org_site_types = auth.org_site_types
tablename = "org_site"
self.super_entity(tablename, "site_id", org_site_types,
# @ToDo: Make Sites Trackable (Mobile Hospitals & Warehouses)
#super_link("track_id", "sit_trackable"),
Field("code",
label = T("Code"),
length = 10, # Mayon compatibility
writable = False,
),
Field("name", notnull=True,
length = 64, # Mayon compatibility
#unique=True,
label = T("Name"),
),
self.gis_location_id(),
self.org_organisation_id(),
Field("obsolete", "boolean",
default = False,
label = T("Obsolete"),
represent = lambda opt: \
(opt and [T("Obsolete")] or
[messages["NONE"]])[0],
readable = False,
writable = False,
),
Field("comments", "text"),
*s3_ownerstamp())
# ---------------------------------------------------------------------
settings = current.deployment_settings
org_site_label = settings.get_org_site_label()
if settings.get_org_site_autocomplete():
widget=S3SiteAutocompleteWidget(),
comment=DIV(_class="tooltip",
_title="%s|%s" % (org_site_label,
messages.AUTOCOMPLETE_HELP))
else:
widget = None
comment = None
org_site_represent = org_SiteRepresent(show_link=True)
site_id = self.super_link("site_id", "org_site",
comment = comment,
#default = auth.user.site_id if auth.is_logged_in() else None,
label = org_site_label,
orderby = "org_site.name",
#readable = True,
represent = org_site_represent,
widget = widget,
#writable = True,
)
# Custom Method for S3SiteAutocompleteWidget
set_method("org", "site",
method = "search_ac",
action = self.site_search_ac)
# Custom Method for S3AddPersonWidget2
# @ToDo: One for HRMs
set_method("org", "site",
method = "site_contact_person",
action = self.site_contact_person)
# Custom Method to Assign HRs
# - done in instances
#set_method("org", "site",
# method = "assign",
# action = self.hrm_AssignMethod(component="human_resource_site"))
self.configure(tablename,
context = {"location": "location_id",
"organisation": "organisation_id",
"org_group": "organisation_id$group_membership.group_id",
},
list_fields = ["id",
"code",
"instance_type",
"name",
"organisation_id",
"location_id",
],
onaccept = self.org_site_onaccept,
ondelete_cascade = self.org_site_ondelete_cascade,
)
# Components
add_components(tablename,
# Facility Types
# Format for S3SQLInlineComponentCheckbox
org_facility_type = {"link": "org_site_facility_type",
"joinby": "site_id",
"key": "facility_type_id",
"actuate": "hide",
},
# Format for filter_widgets & imports
org_site_facility_type = "site_id",
# Human Resources
# - direct component (suitable for Create/List)
hrm_human_resource = "site_id",
# - via link table (suitable for Assign)
hrm_human_resource_site = "site_id",
# Documents
doc_document = "site_id",
doc_image = "site_id",
# Inventory
inv_inv_item = "site_id",
inv_recv = "site_id",
inv_send = "site_id",
# Assets
asset_asset = "site_id",
# Procurement Plans
proc_plan = "site_id",
# Needs
req_site_needs = (# with alias
{"name": "needs",
"joinby": "site_id",
"multiple": False,
},
# without alias
{"joinby": "site_id",
"multiple": False,
},
),
# Requests
req_req = "site_id",
req_commit = "site_id",
# Status
org_site_status = {"name": "status",
"joinby": "site_id",
"multiple": False,
},
# Coalitions
org_group = {"link": "org_site_org_group",
"joinby": "site_id",
"key": "group_id",
"actuate": "hide",
},
# Format for InlineComponent/filter_widget
org_site_org_group = "site_id",
)
# Pass names back to global scope (s3.*)
return dict(org_site_id = site_id,
org_site_represent = org_site_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def org_site_onaccept(form):
"""
Create the code from the name
"""
name = form.vars.name
if not name:
return
code_len = current.deployment_settings.get_org_site_code_len()
temp_code = name[:code_len].upper()
db = current.db
site_table = db.org_site
query = (site_table.code == temp_code)
row = db(query).select(site_table.id,
limitby=(0, 1)).first()
if row:
code = temp_code
temp_code = None
wildcard_bit = 1
length = len(code)
max_wc_bit = pow(2, length)
while not temp_code and wildcard_bit < max_wc_bit:
wildcard_posn = []
for w in range(length):
if wildcard_bit & pow(2, w):
wildcard_posn.append(length - (1 + w))
wildcard_bit += 1
code_list = S3SiteModel.getCodeList(code, wildcard_posn)
temp_code = S3SiteModel.returnUniqueCode(code, wildcard_posn,
code_list)
if temp_code:
db(site_table.site_id == form.vars.site_id).update(code=temp_code)
# -------------------------------------------------------------------------
@staticmethod
def org_site_ondelete_cascade(form):
"""
Update realm entity in all related HRs
@todo: clean up records which RESTRICT the site_id
"""
site_id = form.site_id
htable = current.s3db.hrm_human_resource
query = (htable.site_id == site_id)
db = current.db
rows = db(query).select(htable.id)
db(query).update(site_id = None)
current.auth.set_realm_entity(htable, rows, force_update=True)
# -------------------------------------------------------------------------
@staticmethod
def getCodeList(code, wildcard_posn=[]):
"""
Called by org_site_onaccept
"""
temp_code = ""
# Inject the wildcard charater in the right positions
for posn in range(len(code)):
if posn in wildcard_posn:
temp_code += "%"
else:
temp_code += code[posn]
# Now set up the db call
db = current.db
site_table = db.org_site
query = site_table.code.like(temp_code)
rows = db(query).select(site_table.id,
site_table.code)
# Extract the rows in the database to provide a list of used codes
codeList = []
for record in rows:
codeList.append(record.code)
return codeList
# -------------------------------------------------------------------------
@staticmethod
def returnUniqueCode(code, wildcard_posn=[], code_list=[]):
"""
Called by org_site_onaccept
"""
# Select the replacement letters with numbers first and then
# followed by the letters in least commonly used order
replacement_char = "1234567890ZQJXKVBWPYGUMCFLDHSIRNOATE"
rep_posn = [0] * len(wildcard_posn)
finished = False
while (not finished):
# Find the next code to try
temp_code = ""
r = 0
for posn in range(len(code)):
if posn in wildcard_posn:
temp_code += replacement_char[rep_posn[r]]
r += 1
else:
temp_code += code[posn]
if temp_code not in code_list:
return temp_code
# Set up the next rep_posn
p = 0
while (p < len(wildcard_posn)):
if rep_posn[p] == 35: # the maximum number of replacement characters
rep_posn[p] = 0
p += 1
else:
rep_posn[p] = rep_posn[p] + 1
break
# If no new permutation of replacement characters has been found
if p == len(wildcard_posn):
return None
# -------------------------------------------------------------------------
@staticmethod
def site_contact_person(r, **attr):
"""
JSON lookup method for S3AddPersonWidget2
"""
site_id = r.id
if not site_id:
output = current.xml.json_message(False, 400, "No id provided!")
raise HTTP(400, body=output)
db = current.db
s3db = current.s3db
ltable = s3db.hrm_human_resource_site
htable = db.hrm_human_resource
query = (ltable.site_id == site_id) & \
(ltable.site_contact == True) & \
(ltable.human_resource_id == htable.id)
person = db(query).select(htable.person_id,
limitby=(0, 1)).first()
if person:
fake = Storage(id = person.person_id,
tablename = "org_site",
)
return s3db.pr_person_lookup(fake, **attr)
else:
current.response.headers["Content-Type"] = "application/json"
output = json.dumps(None, separators=SEPARATORS)
return output
# -------------------------------------------------------------------------
@staticmethod
def site_search_ac(r, **attr):
"""
JSON search method for S3SiteAutocompleteWidget
@param r: the S3Request
@param attr: request attributes
"""
response = current.response
resource = r.resource
settings = current.deployment_settings
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = current.request.get_vars
# JQueryUI Autocomplete uses "term" instead of "value"
# (old JQuery Autocomplete uses "q" instead of "value")
value = _vars.term or _vars.value or _vars.q or None
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower().strip()
if not value:
output = current.xml.json_message(False, 400,
"Missing option! Require value")
raise HTTP(400, body=output)
# Construct query
query = (FS("name").lower().like(value + "%"))
# Add template specific search criteria
extra_fields = settings.get_org_site_autocomplete_fields()
for field in extra_fields:
if "addr_street" in field:
# Need to be able to get through the street number
query |= (FS(field).lower().like("%" + value + "%"))
else:
query |= (FS(field).lower().like(value + "%"))
resource.add_filter(query)
MAX_SEARCH_RESULTS = settings.get_search_max_results()
limit = int(_vars.limit or MAX_SEARCH_RESULTS)
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = json.dumps([
dict(label=str(current.T("There are more than %(max)s results, please input more characters.") % dict(max=MAX_SEARCH_RESULTS)))
], separators=SEPARATORS)
else:
from s3.s3widgets import set_match_strings
s3db = current.s3db
# default fields to return
fields = ["name",
"site_id",
]
# Add template specific fields to return
fields += extra_fields
rows = resource.select(fields,
start=0,
limit=limit,
orderby="name",
as_rows=True)
output = []
append = output.append
for row in rows:
# Populate record
_row = row.get("org_site", row)
record = {"id": _row.site_id,
"name": _row.name,
}
# Populate fields only if present
org = row.get("org_organisation.name", None)
if org:
record["org"] = org
L1 = row.get("gis_location.L1", None)
if L1:
record["L1"] = L1
L2 = row.get("gis_location.L2", None)
if L2:
record["L2"] = L2
L3 = row.get("gis_location.L3", None)
if L3:
record["L3"] = L3
L4 = row.get("gis_location.L4", None)
if L4:
record["L4"] = L4
addr_street = row.get("gis_location.addr_street", None)
if addr_street:
record["addr"] = addr_street
# Populate match information (if applicable)
set_match_strings(record, value)
append(record)
output = json.dumps(output, separators=SEPARATORS)
response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class S3SiteDetailsModel(S3Model):
""" Extra optional details for Sites """
names = ("org_site_status",
"org_site_org_group",
)
def model(self):
T = current.T
define_table = self.define_table
super_link = self.super_link
settings = current.deployment_settings
last_contacted = settings.get_org_site_last_contacted()
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
facility_status_opts = {
1: T("Normal"),
2: T("Compromised"),
3: T("Evacuating"),
4: T("Closed"),
99: T("No Response"),
}
power_supply_type_opts = {
1: T("Grid"),
2: T("Generator"),
98: T("Other"),
99: T("None"),
}
# ---------------------------------------------------------------------
# Site Status
#
tablename = "org_site_status"
define_table(tablename,
# Component not instance
super_link("site_id", "org_site"),
Field("facility_status", "integer",
requires = IS_EMPTY_OR(
IS_IN_SET(facility_status_opts)),
label = T("Facility Status"),
represent = lambda opt: \
NONE if opt is None else \
facility_status_opts.get(opt,
UNKNOWN_OPT)),
s3_date("date_reopening",
label = T("Estimated Reopening Date"),
readable = False,
writable = False,
),
Field("power_supply_type", "integer",
label = T("Power Supply Type"),
requires = IS_EMPTY_OR(
IS_IN_SET(power_supply_type_opts,
zero=None)),
represent = lambda opt: \
NONE if opt is None else \
power_supply_type_opts.get(opt,
UNKNOWN_OPT)),
s3_date("last_contacted",
label = T("Last Contacted"),
readable = last_contacted,
writable = last_contacted,
),
*s3_meta_fields())
# CRUD Strings
site_label = settings.get_org_site_label()
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add %(site_label)s Status") % dict(site_label=site_label),
title_display = T("%(site_label)s Status") % dict(site_label=site_label),
title_list = T("%(site_label)s Status") % dict(site_label=site_label),
title_update = T("Edit %(site_label)s Status") % dict(site_label=site_label),
label_list_button = T("List %(site_label)s Status") % dict(site_label=site_label),
msg_record_created = T("%(site_label)s Status added") % dict(site_label=site_label),
msg_record_modified = T("%(site_label)s Status updated") % dict(site_label=site_label),
msg_record_deleted = T("%(site_label)s Status deleted") % dict(site_label=site_label),
msg_list_empty = T("There is no status for this %(site_label)s yet. Add %(site_label)s Status.") % dict(site_label=site_label),
)
# ---------------------------------------------------------------------
# Sites <> Coalitions link table
#
tablename = "org_site_org_group"
define_table(tablename,
# Component not instance
super_link("site_id", "org_site"),
self.org_group_id(empty=False),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3FacilityModel(S3Model):
"""
Generic Site
"""
names = ("org_facility_type",
"org_facility",
"org_site_facility_type",
"org_facility_type_id", # Passed to global for s3translate
"org_facility_geojson",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
settings = current.deployment_settings
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
NONE = current.messages["NONE"]
hierarchical_facility_types = settings.get_org_facility_types_hierarchical()
# ---------------------------------------------------------------------
# Facility Types (generic)
#
tablename = "org_facility_type"
define_table(tablename,
Field("name",
label = T("Name"),
),
Field("parent", "reference org_facility_type", # This form of hierarchy may not work on all Databases
label = T("SubType of"),
ondelete = "RESTRICT",
readable = hierarchical_facility_types,
writable = hierarchical_facility_types,
),
s3_comments(),
*s3_meta_fields()
)
type_represent = S3Represent(lookup = tablename,
# Questionable UX:
#hierarchy = hierarchical_facility_types,
translate = True,
)
if hierarchical_facility_types:
hierarchy = "parent"
# Can't be defined in-line as otherwise get a circular reference
table = db[tablename]
table.parent.represent = type_represent
table.parent.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_facility_type.id",
type_represent,
# If limiting to just 1 level of parent
#filterby="parent",
#filter_opts=(None,),
orderby="org_facility_type.name"))
list_fields = [(T("Type"), "parent"),
#(T("SubType"), "name"),
"name",
"comments",
]
else:
hierarchy = None
list_fields = ["name",
"comments",
]
# CRUD strings
# @ToDo: Flexible Labelling: 'Facility, 'Place', 'Site'
ADD_FAC = T("Create Facility Type")
crud_strings[tablename] = Storage(
label_create = ADD_FAC,
title_display = T("Facility Type Details"),
title_list = T("Facility Types"),
title_update = T("Edit Facility Type"),
title_upload = T("Import Facility Types"),
label_list_button = T("List Facility Types"),
label_delete_button = T("Delete Facility Type"),
msg_record_created = T("Facility Type added"),
msg_record_modified = T("Facility Type updated"),
msg_record_deleted = T("Facility Type deleted"),
msg_list_empty = T("No Facility Types currently registered"))
facility_type_id = S3ReusableField("facility_type_id",
"reference %s" % tablename,
label = T("Facility Type"),
ondelete = "CASCADE",
represent = type_represent,
# Only used by org_site_facility_type
requires = IS_ONE_OF(db, "org_facility_type.id",
type_represent,
sort = True,
),
sortby = "name",
comment = S3AddResourceLink(c = "org",
f = "facility_type",
label = ADD_FAC,
title = T("Facility Type"),
tooltip = T("If you don't see the Type in the list, you can add a new one by clicking link 'Create Facility Type'.")),
)
configure(tablename,
deduplicate = self.org_facility_type_duplicate,
hierarchy = hierarchy,
list_fields = list_fields,
)
# ---------------------------------------------------------------------
# Facilities (generic)
#
if settings.get_org_facility_code_unique():
code_requires = IS_EMPTY_OR(IS_NOT_IN_DB(db, "org_facility.code"))
else:
code_requires = None
tablename = "org_facility"
define_table(tablename,
# Instance
super_link("doc_id", "doc_entity"),
super_link("pe_id", "pr_pentity"),
super_link("site_id", "org_site"),
Field("name", notnull=True,
length = 64, # Mayon Compatibility
label = T("Name"),
),
Field("code", length=10, # Mayon compatibility
#notnull=True,
label = T("Code"),
# Deployments that don't wants office codes can hide them
#readable=False, writable=False,
represent = lambda v: v or NONE,
requires = code_requires,
),
self.org_organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
self.gis_location_id(),
Field("opening_times",
label = T("Opening Times"),
represent = lambda v: v or NONE,
),
Field("contact",
label = T("Contact"),
represent = lambda v: v or NONE,
),
Field("phone1",
label = T("Phone 1"),
represent = lambda v: v or NONE,
requires=IS_EMPTY_OR(s3_phone_requires),
),
Field("phone2",
label = T("Phone 2"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("email",
label = T("Email"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(IS_EMAIL()),
),
Field("website",
label = T("Website"),
represent = lambda v: v or NONE,
),
Field("obsolete", "boolean",
default = False,
label = T("Obsolete"),
represent = lambda opt: \
(opt and [T("Obsolete")] or [current.messages["NONE"]])[0],
readable = False,
writable = False,
),
Field.Method("inv", org_site_has_inv),
Field.Method("assets", org_site_has_assets),
Field.Method("reqs", org_site_top_req_priority),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_FAC = T("Create Facility")
crud_strings[tablename] = Storage(
label_create = ADD_FAC,
title_display = T("Facility Details"),
title_list = T("Facilities"),
title_update = T("Edit Facility"),
title_map = T("Map of Facilities"),
title_upload = T("Import Facilities"),
label_list_button = T("List Facilities"),
label_delete_button = T("Delete Facility"),
msg_record_created = T("Facility added"),
msg_record_modified = T("Facility updated"),
msg_record_deleted = T("Facility deleted"),
msg_list_empty = T("No Facilities currently registered"))
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
text_fields = ["name",
"code",
"comments",
"organisation_id$name",
"organisation_id$acronym",
]
report_fields = ["name",
"site_facility_type.facility_type_id",
"organisation_id",
]
for level in levels:
lfield = "location_id$%s" % level
report_fields.append(lfield)
text_fields.append(lfield)
if hierarchical_facility_types:
type_filter = S3HierarchyFilter("site_facility_type.facility_type_id",
label = T("Type"),
)
else:
type_filter = S3OptionsFilter("site_facility_type.facility_type_id",
# @ToDo: Introspect need for header based on # records
#header = True,
label = T("Type"),
# Doesn't support translation
#represent = "%(name)s",
)
filter_widgets = [
S3TextFilter(text_fields,
label = T("Search"),
#_class = "filter-search",
),
type_filter,
S3OptionsFilter("organisation_id",
# @ToDo: Introspect need for header based on # records
#header = True,
label = T("Organization"),
represent = "%(name)s",
),
S3LocationFilter("location_id",
# @ToDo: Display by default in Summary Views but not others?
#hidden = True,
label = T("Location"),
levels = levels,
),
]
groups = settings.get_org_groups()
if groups:
report_fields.append("site_org_group.group_id")
filter_widgets.insert(1,
S3OptionsFilter("site_org_group.group_id",
# @ToDo: Introspect need for header based on # records
#header = True,
represent = "%(name)s",
))
if settings.get_org_regions():
report_fields.append("organisation_id$region_id")
if settings.get_org_regions_hierarchical():
filter_widget = S3HierarchyFilter("organisation_id$region_id",
#hidden = True,
label = T("Region"),
)
else:
filter_widget = S3OptionsFilter("organisation_id$region_id",
#hidden = True,
label = T("Region"),
)
filter_widgets.insert(1, filter_widget)
if settings.has_module("inv"):
report_fields.append((T("Inventory"), "inv"))
filter_widgets.append(
S3OptionsFilter("inv",
label = T("Inventory"),
options = {True: T("Yes"),
False: T("No"),
},
cols = 2,
))
if settings.has_module("asset"):
report_fields.append((T("Assets"), "assets"))
filter_widgets.append(
S3OptionsFilter("assets",
label = T("Assets"),
options = {True: T("Yes"),
False: T("No"),
},
cols = 2,
))
if settings.has_module("req"):
# @ToDo: Report should show Total Open/Closed Requests
report_fields.append((T("Highest Priority Open Requests"), "reqs"))
filter_widgets.append(
S3OptionsFilter("reqs",
label = T("Highest Priority Open Requests"),
options = self.req_priority_opts,
cols = 3,
))
report_options = Storage(
rows = report_fields,
cols = report_fields,
fact = [(T("Number of Facilities"), "count(id)"),
(T("List of Facilities"), "list(name)"),
],
defaults = Storage(rows = lfield, # Lowest-level of hierarchy
cols = "site_facility_type.facility_type_id",
fact = "count(id)",
totals = True,
),
)
# Custom Form
if hierarchical_facility_types:
type_widget = "hierarchy"
else:
type_widget = "groupedopts"
crud_form = S3SQLCustomForm("name",
"code",
S3SQLInlineLink(
"facility_type",
label = T("Facility Type"),
field = "facility_type_id",
widget = type_widget,
cols = 3,
),
"organisation_id",
"location_id",
"opening_times",
"contact",
"phone1",
"phone2",
"email",
"website",
#S3SQLInlineComponent(
# "status",
# label = T("Status"),
# fields = ["last_contacted"],
# multiple = False,
#),
"obsolete",
"comments",
)
list_fields = ["name",
"code",
"site_facility_type.facility_type_id",
"organisation_id",
"location_id",
"opening_times",
"contact",
"phone1",
"phone2",
"email",
"website",
"comments",
]
configure(tablename,
context = {"location": "location_id",
"organisation": "organisation_id",
"org_group": "organisation_id$group_membership.group_id",
"request": "req.id",
},
crud_form = crud_form,
deduplicate = self.org_facility_duplicate,
filter_widgets = filter_widgets,
list_fields = list_fields,
onaccept = self.org_facility_onaccept,
realm_components = ("contact_emergency",
"physical_description",
"config",
"image",
"req",
"send",
"human_resource_site",
"note",
"contact",
"role",
"asset",
"commit",
"inv_item",
"document",
"recv",
"address",
),
report_options = report_options,
summary = [{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "report",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map",
"ajax_init": True}],
},
],
super_entity = ("doc_entity", "org_site", "pr_pentity"),
update_realm = True,
)
# Custom Method to Assign HRs
self.set_method("org", "facility",
method = "assign",
action = self.hrm_AssignMethod(component="human_resource_site"))
# ---------------------------------------------------------------------
# Link Table: Sites <> Facility Types
# - currently just used for Facilities but can be easily used by other
# Site types as-required
#
tablename = "org_site_facility_type"
define_table(tablename,
# Component not instance
super_link("site_id", "org_site",
instance_types = current.auth.org_site_types,
label = settings.get_org_site_label(),
orderby = "org_site.name",
represent = self.org_site_represent,
not_filterby = "obsolete",
not_filter_opts = (True,),
readable = True,
writable = True,
),
facility_type_id(),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict(org_facility_type_id = facility_type_id,
org_facility_geojson = self.org_facility_geojson,
)
# -------------------------------------------------------------------------
@staticmethod
def org_facility_onaccept(form):
"""
Update Affiliation, record ownership and component ownership
"""
org_update_affiliations("org_facility", form.vars)
# -------------------------------------------------------------------------
@staticmethod
def org_facility_duplicate(item):
""" Import item de-duplication """
data = item.data
name = data.get("name")
org = data.get("organisation_id")
address = data.get("address")
table = item.table
query = (table.name.lower() == name.lower())
if org:
query = query & (table.organisation_id == org)
if address:
query = query & (table.address == address)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# ---------------------------------------------------------------------
@staticmethod
def org_facility_type_duplicate(item):
"""
Deduplication of Facility Types
"""
name = item.data.get("name")
if not name:
return
table = item.table
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -----------------------------------------------------------------------------
@staticmethod
def org_facility_geojson(jsonp=True,
decimals=4):
"""
Produce a static GeoJSON[P] feed of Facility data
Designed to be run on a schedule to serve a high-volume website
"""
from shapely.geometry import Point
from ..geojson import dumps
db = current.db
s3db = current.s3db
stable = s3db.org_facility
ltable = db.org_site_facility_type
ttable = db.org_facility_type
gtable = db.gis_location
ntable = s3db.req_site_needs
# Limit the number of decimal places
formatter = ".%sf" % decimals
# All Facilities
query = (stable.deleted != True) & \
(stable.obsolete != True) & \
(gtable.id == stable.location_id)
lquery = (ntable.deleted != True) & \
(ntable.site_id == stable.site_id)
left = [ntable.on(lquery),
ltable.on(stable.site_id == ltable.site_id),
ttable.on(ttable.facility_type_id == ltable.facility_type_id),
]
facs = db(query).select(stable.id,
stable.name,
ttable.name,
stable.comments,
stable.opening_times,
stable.phone1,
stable.phone2,
stable.email,
stable.website,
ntable.needs,
gtable.addr_street,
gtable.L1,
gtable.L4,
gtable.lat,
gtable.lon,
left=left,
)
features = []
append = features.append
for f in facs:
g = f.gis_location
x = g.lon
y = g.lat
if x is None or y is None:
continue
x = float(format(x, formatter))
y = float(format(y, formatter))
shape = Point(x, y)
# Compact Encoding
geojson = dumps(shape, separators=SEPARATORS)
o = f.org_facility
properties = {"id": o.id,
"name": o.name,
}
if f.get("org_facility_type.name"):
properties["type"] = f["org_facility_type.name"]
if o.opening_times:
properties["open"] = o.opening_times
if o.comments:
properties["comments"] = o.comments
if g.addr_street:
properties["addr"] = g.addr_street
if g.L1:
# Encode smaller if-possible
L1 = g.L1
if L1 == "New York":
properties["L1"] = "NY"
elif L1 == "New Jersey":
properties["L1"] = "NJ"
else:
properties["L1"] = L1
if g.L4:
properties["L4"] = g.L4
if o.phone1:
properties["ph1"] = o.phone1
if o.phone2:
properties["ph2"] = o.phone2
if o.email:
properties["email"] = o.email
if o.website:
properties["web"] = o.website
n = f.req_site_needs
if n:
if n.needs:
needs = json.loads(n.needs)
if "urgent" in needs:
properties["urgent"] = needs["urgent"]
if "need" in needs:
properties["need"] = needs["need"]
if "no" in needs:
properties["no"] = needs["no"]
f = dict(type = "Feature",
properties = properties,
geometry = json.loads(geojson)
)
append(f)
data = dict(type = "FeatureCollection",
features = features
)
output = json.dumps(data, separators=SEPARATORS)
if jsonp:
filename = "facility.geojsonp"
output = "grid(%s)" % output
else:
filename = "facility.geojson"
path = os.path.join(current.request.folder,
"static", "cache",
filename)
File = open(path, "w")
File.write(output)
File.close()
# -----------------------------------------------------------------------------
def org_facility_rheader(r, tabs=[]):
"""
RHeader for facilities when doing a req_match
"""
T = current.T
s3db = current.s3db
# Need to use this format as otherwise /inv/incoming?viewing=org_office.x
# doesn't have an rheader
tablename, record = s3_rheader_resource(r)
r.record = record
r.table = s3db[tablename]
tabs = [(T("Details"), None)]
try:
tabs = tabs + s3db.req_tabs(r)
except:
pass
try:
tabs = tabs + s3db.inv_tabs(r)
except:
pass
rheader_fields = [["name"], ["location_id"]]
rheader = S3ResourceHeader(rheader_fields, tabs)(r)
return rheader
# =============================================================================
class S3RoomModel(S3Model):
"""
Rooms are a location within a Site
- used by Asset module
"""
names = ("org_room",
"org_room_id",
)
def model(self):
T = current.T
db = current.db
# ---------------------------------------------------------------------
# Rooms (for Sites)
# @ToDo: Validate to ensure that rooms are unique per facility
#
tablename = "org_room"
self.define_table(tablename,
self.org_site_id, # site_id
Field("name", length=128, notnull=True,
label = T("Name"),
),
*s3_meta_fields())
# CRUD strings
ADD_ROOM = T("Create Room")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_ROOM,
title_display = T("Room Details"),
title_list = T("Rooms"),
title_update = T("Edit Room"),
label_list_button = T("List Rooms"),
label_delete_button = T("Delete Room"),
msg_record_created = T("Room added"),
msg_record_modified = T("Room updated"),
msg_record_deleted = T("Room deleted"),
msg_list_empty = T("No Rooms currently registered"))
room_comment = DIV(
S3AddResourceLink(c="org",
f="room",
label=ADD_ROOM,
tooltip=T("Select a Room from the list or click 'Create Room'")),
# Filters Room based on site
SCRIPT(
'''$.filterOptionsS3({
'trigger':'site_id',
'target':'room_id',
'lookupPrefix':'org',
'lookupResource':'room'
})''')
)
# Reusable field for other tables to reference
represent = S3Represent(lookup=tablename)
room_id = S3ReusableField("room_id", "reference %s" % tablename,
label = T("Room"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_room.id",
represent
)),
sortby = "name",
comment = room_comment,
)
self.configure(tablename,
deduplicate = self.org_room_duplicate,
)
# Pass names back to global scope (s3.*)
return dict(org_room_id = room_id,
)
# -------------------------------------------------------------------------
@staticmethod
def org_room_duplicate(item):
""" Import item de-duplication """
name = item.data.get("name")
table = item.table
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3OfficeModel(S3Model):
names = ("org_office",
"org_office_type",
"org_office_type_id",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
messages = current.messages
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Office Types
#
tablename = "org_office_type"
define_table(tablename,
Field("name", length=128, notnull=True, unique=True,
label = T("Name"),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_OFFICE_TYPE = T("Create Office Type")
crud_strings[tablename] = Storage(
label_create = ADD_OFFICE_TYPE,
title_display = T("Office Type Details"),
title_list = T("Office Types"),
title_update = T("Edit Office Type"),
label_list_button = T("List Office Types"),
label_delete_button = T("Delete Office Type"),
msg_record_created = T("Office Type added"),
msg_record_modified = T("Office Type updated"),
msg_record_deleted = T("Office Type deleted"),
msg_list_empty = T("No Office Types currently registered"))
represent = S3Represent(lookup=tablename, translate=True)
office_type_id = S3ReusableField("office_type_id", "reference %s" % tablename,
label = T("Office Type"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_office_type.id",
represent,
sort=True
)),
sortby = "name",
comment = S3AddResourceLink(c="org",
f="office_type",
label=ADD_OFFICE_TYPE,
title=T("Office Type"),
tooltip=T("If you don't see the Type in the list, you can add a new one by clicking link 'Create Office Type'.")),
)
configure(tablename,
deduplicate = self.office_type_duplicate,
)
# Components
add_components(tablename,
# Tags
org_office_type_tag = {"name": "tag",
"joinby": "office_type_id",
},
)
# ---------------------------------------------------------------------
# Offices
#
if settings.get_org_office_code_unique():
code_requires = IS_EMPTY_OR(IS_NOT_IN_DB(db, "org_office.code"))
else:
code_requires = None
tablename = "org_office"
define_table(tablename,
super_link("doc_id", "doc_entity"),
super_link("pe_id", "pr_pentity"),
super_link("site_id", "org_site"),
Field("name", notnull=True,
length=64, # Mayon Compatibility
label = T("Name"),
),
Field("code", length=10, # Mayon compatibility
label = T("Code"),
# Deployments that don't wants office codes can hide them
#readable=False,
#writable=False,
requires = code_requires,
),
self.org_organisation_id(
requires = org_organisation_requires(required=True,
updateable=True),
),
office_type_id(
#readable = False,
#writable = False,
),
self.gis_location_id(),
Field("phone1",
label = T("Phone 1"),
represent = lambda v: v or "",
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("phone2",
label = T("Phone 2"),
represent = lambda v: v or "",
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("email",
label = T("Email"),
represent = lambda v: v or "",
requires = IS_EMPTY_OR(IS_EMAIL()),
),
Field("fax",
label = T("Fax"),
represent = lambda v: v or "",
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("obsolete", "boolean",
default = False,
label = T("Obsolete"),
represent = lambda opt: \
(opt and [T("Obsolete")] or [messages["NONE"]])[0],
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Office"),
title_display = T("Office Details"),
title_list = T("Offices"),
title_update = T("Edit Office"),
title_upload = T("Import Offices"),
title_map = T("Map of Offices"),
label_list_button = T("List Offices"),
label_delete_button = T("Delete Office"),
msg_record_created = T("Office added"),
msg_record_modified = T("Office updated"),
msg_record_deleted = T("Office deleted"),
msg_list_empty = T("No Offices currently registered"))
if settings.get_org_branches():
ORGANISATION = T("Organization/Branch")
comment = T("Search for office by organization or branch.")
org_filter = S3HierarchyFilter("organisation_id",
label = ORGANISATION,
comment = comment,
#hidden = True,
)
else:
ORGANISATION = T("Organization")
comment = T("Search for office by organization.")
org_filter = S3OptionsFilter("organisation_id",
label = ORGANISATION,
comment = comment,
represent = "%(name)s",
#hidden = True,
)
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
text_fields = ["name",
"code",
"comments",
"organisation_id$name",
"organisation_id$acronym",
]
list_fields = ["id",
"name",
"organisation_id", # Filtered in Component views
"office_type_id",
]
for level in levels:
lfield = "location_id$%s" % level
text_fields.append(lfield)
list_fields += [(T("Address"), "location_id$addr_street"),
"phone1",
"email",
]
filter_widgets = [
S3TextFilter(text_fields,
label = T("Search"),
#_class = "filter-search",
),
#S3OptionsFilter("office_type_id",
# label = T("Type"),
# #hidden = True,
# ),
org_filter,
S3LocationFilter("location_id",
label = T("Location"),
levels = levels,
#hidden = True,
),
]
configure(tablename,
context = {"location": "location_id",
"organisation": "organisation_id",
"org_group": "organisation_id$group_membership.group_id",
},
deduplicate = self.org_office_duplicate,
filter_widgets = filter_widgets,
list_fields = list_fields,
onaccept = self.org_office_onaccept,
realm_components = ("contact_emergency",
"config",
"image",
"req",
"send",
"human_resource_site",
"note",
"contact",
"role",
"asset",
"commit",
"inv_item",
"document",
"recv",
"address",
),
super_entity = ("doc_entity", "pr_pentity", "org_site"),
update_realm = True,
)
if settings.get_org_summary():
add_components(tablename,
org_office_summary = {"name": "summary",
"joinby": "office_id",
},
)
# Pass names back to global scope (s3.*)
return dict(org_office_type_id = office_type_id,
)
# -------------------------------------------------------------------------
@staticmethod
def office_type_duplicate(item):
""" Import item de-duplication """
name = item.data.get("name")
table = item.table
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# ---------------------------------------------------------------------
@staticmethod
def org_office_onaccept(form):
"""
* Update Affiliation and Realms
* Process injected fields
"""
form_vars = form.vars
# Affiliation, record ownership and component ownership
org_update_affiliations("org_office", form_vars)
if current.deployment_settings.get_org_summary():
db = current.db
id = form_vars.id
table = current.s3db.org_office_summary
query = (table.office_id == id)
existing = db(query).select(table.id,
limitby=(0, 1)).first()
post_vars = current.request.post_vars
national_staff = post_vars.get("national_staff", None)
international_staff = post_vars.get("international_staff", None)
if existing:
db(query).update(national_staff = national_staff,
international_staff = international_staff
)
elif national_staff or international_staff:
table.insert(office_id = id,
national_staff = national_staff,
international_staff = international_staff
)
# ---------------------------------------------------------------------
@staticmethod
def org_office_duplicate(item):
"""
Import item deduplication:
- match by name
- match org, if defined
(Adding location_id doesn't seem to be a good idea)
@param item: the S3ImportItem instance
"""
data = item.data
name = data.get("name")
if not name:
return
table = item.table
query = (table.name.lower() == name.lower())
#location_id = data.get("location_id")
#if location_id:
# # This doesn't find deleted records:
# query = query & (table.location_id == location_id)
org = data.get("organisation_id")
if org:
query &= (table.organisation_id == org)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
# if duplicate is None and location_id:
## Search for deleted offices with this name
# query = (table.name.lower() == name.lower()) & \
# (table.deleted == True)
# row = db(query).select(table.id, table.deleted_fk,
# limitby=(0, 1)).first()
# if row:
# fkeys = json.loads(row.deleted_fk)
# if "location_id" in fkeys and \
# str(fkeys["location_id"]) == str(location_id):
# duplicate = row
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3OfficeSummaryModel(S3Model):
"""
Office Summary fields visible when settings.org.summary = True
@ToDo: Deprecate in favour of S3OrganisationResourceModel
"""
names = ("org_office_summary",)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Summary data
#
tablename = "org_office_summary"
self.define_table(tablename,
Field("office_id",
label = T("Office"),
ondelete = "CASCADE",
requires = IS_ONE_OF(current.db, "org_office.id",
"%(name)s"),
),
Field("national_staff", "integer", # national is a reserved word in Postgres
label = T("# of National Staff"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 9999)),
),
Field("international_staff", "integer",
label = T("# of International Staff"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 9999)),
),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3OfficeTypeTagModel(S3Model):
"""
Office Type Tags
"""
names = ("org_office_type_tag",)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Office Type Tags
# - Key-Value extensions
# - can be used to provide conversions to external systems, such as:
# * HXL
# - can be a Triple Store for Semantic Web support
#
tablename = "org_office_type_tag"
self.define_table(tablename,
self.org_office_type_id(),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
def org_organisation_address(row):
""" The address of the first office """
if hasattr(row, "org_organisation"):
row = row.org_organisation
try:
organisation_id = row.id
except:
# not available
return current.messages["NONE"]
db = current.db
s3db = current.s3db
otable = s3db.org_office
gtable = s3db.gis_location
query = (otable.deleted != True) & \
(otable.organisation_id == organisation_id) & \
(otable.location_id == gtable.id)
row = db(query).select(gtable.addr_street, limitby=(0, 1)).first()
if row:
row.addr_street
else:
return current.messages["NONE"]
# =============================================================================
def org_organisation_logo(id,
#type="png",
):
"""
Return a logo of the organisation with the given id, if one exists
The id can either be the id of the organisation
or a Row of the organisation
@ToDo: The type can either be png or bmp and is the format of the saved image
"""
if not id:
return None
s3db = current.s3db
if isinstance(id, Row):
# Do not repeat the lookup if already done by IS_ONE_OF or RHeader
record = id
else:
table = s3db.org_organisation
record = current.db(table.id == id).select(table.name,
table.acronym,
table.logo,
limitby=(0, 1)).first()
if record and record.logo:
#format = None
#if type == "bmp":
# format = "bmp"
size = (None, 60)
image = s3db.pr_image_represent(record.logo, size=size)
url_small = URL(c="default", f="download", args=image)
if record.acronym is None or record.acronym == "":
alt = "%s logo" % record.name
else:
alt = "%s logo" % record.acronym
logo = IMG(_src=url_small,
_alt=alt,
_height=60,
)
return logo
return DIV() # no logo so return an empty div
# =============================================================================
def org_parents(organisation_id, path=[]):
"""
Lookup the parent organisations of a branch organisation
@param organisation_id: the organisation's record ID
@return: list of ids of the parent organisations, starting with the immediate parent
"""
if not organisation_id:
return path
db = current.db
s3db = current.s3db
otable = s3db.org_organisation
btable = s3db.org_organisation.with_alias("org_branch_organisation")
ltable = s3db.org_organisation_branch
query = (btable.id == organisation_id)
join = (ltable.deleted != True) & \
(btable.deleted != True) & \
(otable.deleted != True) & \
(btable.id == ltable.branch_id) & \
(otable.id == ltable.organisation_id)
row = db(query & join).select(otable.id,
limitby=(0, 1)).first()
if row is not None:
# Parent exists
organisation_id = row.id
path.insert(0, organisation_id)
return org_parents(organisation_id, path)
else:
# This is the root org
return path
# =============================================================================
def org_root_organisation(organisation_id):
"""
Lookup the root organisation of a branch organisation
@param organisation_id: the organisation's record ID
@return: id of the root organisation,
or None if no root organisation can be found
"""
if not organisation_id:
return None
db = current.db
s3db = current.s3db
otable = s3db.org_organisation
btable = s3db.org_organisation.with_alias("org_branch_organisation")
ltable = s3db.org_organisation_branch
query = (btable.id == organisation_id)
join = (ltable.deleted != True) & \
(btable.deleted != True) & \
(otable.deleted != True) & \
(btable.id == ltable.branch_id) & \
(otable.id == ltable.organisation_id)
row = db(query & join).select(otable.id,
limitby=(0, 1)).first()
if row is not None:
# Parent exists
return org_root_organisation(row.id)
else:
# This is the root org
return organisation_id
# =============================================================================
def org_root_organisation_name(organisation_id):
"""
Lookup the root organisation name of a branch organisation
@param organisation_id: the organisation's record ID
@return: name of the root organisation,
or None if no root organisation can be found
"""
if not organisation_id:
return None
db = current.db
s3db = current.s3db
otable = s3db.org_organisation
btable = s3db.org_organisation.with_alias("org_branch_organisation")
ltable = s3db.org_organisation_branch
query = (btable.id == organisation_id)
join = (ltable.deleted != True) & \
(btable.deleted != True) & \
(otable.deleted != True) & \
(btable.id == ltable.branch_id) & \
(otable.id == ltable.organisation_id)
row = db(query & join).select(otable.id,
limitby=(0, 1)).first()
if row is not None:
# Parent exists
return org_root_organisation_name(row.id)
else:
# This is the root org
row = db(otable.id == organisation_id).select(otable.name,
limitby=(0, 1)).first()
if row:
return row.name
# =============================================================================
def org_organisation_requires(required = False,
realms = None,
updateable = False
):
"""
@param required: Whether the selection is optional or mandatory
@param realms: Whether the list should be filtered to just those
belonging to a list of realm entities
@param updateable: Whether the list should be filtered to just those
which the user has Write access to
"""
requires = IS_ONE_OF(current.db, "org_organisation.id",
org_OrganisationRepresent(),
realms = realms,
updateable = updateable,
orderby = "org_organisation.name",
sort = True)
if not required:
requires = IS_EMPTY_OR(requires)
return requires
# =============================================================================
def org_region_options(zones=False):
"""
Get all options for region IDs
@param zones: select only zones if True, otherwise only regions
@return: dict of {org_region.id: representation}
"""
represent = current.s3db.org_region_represent
if represent is None:
return dict()
db = current.db
rtable = db.org_region
if zones:
query = (rtable.parent == None)
else:
query = (rtable.parent != None)
query &= (rtable.deleted != True)
rows = db(query).select(rtable.id, rtable.name)
options = represent.bulk(None, rows=rows)
options.pop(None, None) # Remove the None options
return options
# =============================================================================
class org_OrganisationRepresent(S3Represent):
""" Representation of Organisations """
def __init__(self,
show_link=False,
parent=True,
acronym=True,
multiple=False,
skip_dt_orderby=False,
):
self.acronym = acronym
settings = current.deployment_settings
# Translation uses org_organisation_name & not T()
translate = settings.get_L10n_translate_org_organisation()
if translate:
language = current.session.s3.language
if language == current.deployment_settings.get_L10n_default_language():
translate = False
if skip_dt_orderby:
# org/branch component which doesn't like the left join
self.skip_dt_orderby = True
if parent and settings.get_org_branches():
# Need a custom lookup
self.lookup_rows = self.custom_lookup_rows
self.parent = True
if translate:
fields = ["org_organisation.name",
"org_organisation.acronym",
"org_parent_organisation.name",
"org_organisation_name.name_l10n",
"org_organisation_name.acronym_l10n",
"org_parent_organisation_name.name_l10n",
]
else:
fields = ["org_organisation.name",
"org_organisation.acronym",
"org_parent_organisation.name",
]
elif translate:
# Need a custom lookup
self.lookup_rows = self.custom_lookup_rows
self.parent = False
fields = ["org_organisation.name",
"org_organisation.acronym",
"org_organisation_name.name_l10n",
"org_organisation_name.acronym_l10n",
]
else:
# Can use standard lookup of fields
self.parent = False
fields = ["name",
"acronym",
]
super(org_OrganisationRepresent,
self).__init__(lookup="org_organisation",
fields=fields,
show_link=show_link,
translate=translate,
multiple=multiple)
# -------------------------------------------------------------------------
def custom_lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for organisation rows, does a
left join with the parent organisation. Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the organisation IDs
"""
db = current.db
s3db = current.s3db
otable = s3db.org_organisation
fields = [otable.id,
otable.name,
otable.acronym,
]
if self.parent:
btable = s3db.org_organisation_branch
ptable = db.org_organisation.with_alias("org_parent_organisation")
fields.append(ptable.name)
left = [btable.on(btable.branch_id == otable.id),
ptable.on(ptable.id == btable.organisation_id),
]
if self.translate:
ltable = s3db.org_organisation_name
fields += [ltable.name_l10n,
ltable.acronym_l10n,
]
if self.parent:
lptable = db.org_organisation_name.with_alias("org_parent_organisation_name")
fields.append(lptable.name_l10n)
left += [ltable.on(ltable.organisation_id == otable.id),
lptable.on(lptable.organisation_id == btable.organisation_id),
]
else:
left = [ltable.on(ltable.organisation_id == otable.id),
]
qty = len(values)
if qty == 1:
query = (otable.id == values[0])
limitby = (0, 1)
else:
query = (otable.id.belongs(values))
limitby = (0, qty)
rows = db(query).select(left=left,
limitby=limitby,
*fields)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the org_organisation Row
"""
if self.translate:
# Custom Row (with the name_l10n left-joined)
name = row["org_organisation_name.name_l10n"] or row["org_organisation.name"]
acronym = row["org_organisation_name.acronym_l10n"] or row["org_organisation.acronym"]
if self.parent:
parent = row["org_parent_organisation_name.name_l10n"] or row["org_parent_organisation.name"]
else:
if self.parent:
# Custom Row (with the parent left-joined)
name = row["org_organisation.name"]
acronym = row["org_organisation.acronym"]
parent = row["org_parent_organisation.name"]
else:
# Standard row (from fields)
name = row["name"]
acronym = row["acronym"]
if not name:
return self.default
if self.acronym and acronym:
name = "%s (%s)" % (name, acronym)
if self.parent and parent:
name = "%s > %s" % (parent, name)
return s3_unicode(name)
# -------------------------------------------------------------------------
def dt_orderby(self, field, direction, orderby, left):
"""
Custom orderby logic for datatables
@ToDo: Support for self.translate = True
need to handle the inevitable NULL values which vary in
order by DB, altthough perhaps DB handline doesn't matter
here.
"""
otable = current.s3db.org_organisation
left.add(otable.on(field == otable.id))
if self.parent:
# If we use a hierarchical representation, order by root
# organisation name first because it appears before the
# branch name:
rotable = otable.with_alias("org_root_organisation")
left.add(rotable.on(otable.root_organisation == rotable.id))
orderby.extend(["org_root_organisation.name%s" % direction,
"org_organisation.name%s" % direction,
])
#elif self.translate:
# # Order by translated name
# orderby.append("org_organisation_name.name_l10n%s" % direction)
else:
# Otherwise: order by organisation name
# e.g. the branches component view
orderby.append("org_organisation.name%s" % direction)
# =============================================================================
class org_SiteRepresent(S3Represent):
""" Representation of Sites """
def __init__(self,
translate = False,
show_link = False,
multiple = False,
show_type = True,
):
self.show_type = show_type
if show_type or show_link:
# Need a custom lookup
self.lookup_rows = self.custom_lookup_rows
# Need a custom representation
fields = ["name"]
super(org_SiteRepresent,
self).__init__(lookup="org_site",
fields=fields,
show_link=show_link,
translate=translate,
multiple=multiple)
# -------------------------------------------------------------------------
def bulk(self, values, rows=None, list_type=False, show_link=True, include_blank=True):
"""
Represent multiple values as dict {value: representation}
@param values: list of values
@param rows: the referenced rows (if values are foreign keys)
@param show_link: render each representation as link
@param include_blank: Also include a blank value
@return: a dict {value: representation}
"""
show_link = show_link and self.show_link
if show_link and not rows:
# Retrieve the rows
rows = self.custom_lookup_rows(None, values)
self._setup()
# Get the values
if rows and self.table:
values = [row["org_site.site_id"] for row in rows]
else:
values = [values] if type(values) is not list else values
# Lookup the representations
if values:
labels = self._lookup(values, rows=rows)
if show_link:
link = self.link
rows = self.rows
labels = dict((k, link(k, v, rows.get(k)))
for k, v in labels.items())
for v in values:
if v not in labels:
labels[v] = self.default
else:
labels = {}
if include_blank:
labels[None] = self.none
return labels
# -------------------------------------------------------------------------
def custom_lookup_rows(self, key, values, fields=[]):
"""
Custom lookup method for site rows, does a
left join with any instance_types found. Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the site IDs
"""
db = current.db
s3db = current.s3db
stable = s3db.org_site
qty = len(values)
if qty == 1:
query = (stable.id == values[0])
limitby = (0, 1)
else:
query = (stable.id.belongs(values))
limitby = (0, qty)
if self.show_link:
# We need the instance_type IDs
# Do a first query to see which instance_types we have
rows = db(query).select(stable.instance_type,
limitby=limitby)
instance_types = []
for row in rows:
if row.instance_type not in instance_types:
instance_types.append(row.instance_type)
# Now do a second query which left-joins with all the instance tables we have
fields = [stable.site_id,
stable.instance_type,
stable.name,
]
left = []
for instance_type in instance_types:
table = s3db[instance_type]
fields.append(table.id)
left.append(table.on(table.site_id == stable.site_id))
if instance_type == "org_facility":
# We also need the Facility Types
ltable = db.org_site_facility_type
ttable = db.org_facility_type
fields.append(ttable.name)
left.append(ltable.on(ltable.site_id == stable.site_id))
left.append(ttable.on(ttable.id == ltable.facility_type_id))
rows = db(query).select(*fields,
left=left,
limitby=limitby)
else:
# We don't need instance_type IDs
# Just do a join with org_facility_type
ttable = s3db.org_facility_type
ltable = db.org_site_facility_type
left = [ltable.on(ltable.site_id == stable.site_id),
ttable.on(ttable.id == ltable.facility_type_id)]
rows = db(query).select(stable.site_id,
stable.instance_type,
stable.name,
ttable.name,
left=left,
limitby=limitby)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link.
@param k: the key (site_id)
@param v: the representation of the key
@param row: the row with this key
"""
if row:
try:
instance_type = row["org_site.instance_type"]
id = row[instance_type].id
except AttributeError:
return v
else:
c, f = instance_type.split("_", 1)
return A(v, _href=URL(c=c, f=f, args=[id],
# remove the .aaData extension in paginated views
extension=""
))
else:
# We have no way to determine the linkto
return v
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the org_site Row
"""
name = row["org_site.name"]
if not name:
return self.default
if self.show_type:
instance_type = row["org_site.instance_type"]
facility_type = row.get("org_facility_type.name", None)
if facility_type:
# These need to be translated
name = "%s (%s)" % (name, current.T(facility_type))
else:
instance_type = current.auth.org_site_types.get(instance_type, None)
if instance_type:
name = "%s (%s)" % (name, instance_type)
return s3_unicode(name)
# =============================================================================
def org_site_has_assets(row, tablename="org_facility"):
""" Whether a Site has Assets """
if not settings.has_module("asset"):
return False
if hasattr(row, tablename):
row = row[tablename]
try:
id = row.id
except AttributeError:
return None
s3db = current.s3db
atable = s3db.asset_asset
stable = s3db[tablename]
query = (atable.deleted != True) & \
(stable.id == id) & \
(atable.site_id == stable.site_id)
asset = current.db(query).select(atable.id,
limitby=(0, 1)).first()
if asset:
return True
else:
return False
# =============================================================================
def org_site_has_inv(row, tablename="org_facility"):
""" Whether a Site has Inventory """
if not settings.has_module("inv"):
return False
if hasattr(row, tablename):
row = row[tablename]
try:
id = row.id
except AttributeError:
return None
s3db = current.s3db
itable = s3db.inv_inv_item
stable = s3db[tablename]
query = (itable.deleted != True) & \
(stable.id == id) & \
(itable.site_id == stable.site_id) & \
(itable.quantity > 0)
inv = current.db(query).select(itable.id,
limitby=(0, 1)).first()
if inv:
return True
else:
return False
# =============================================================================
def org_site_top_req_priority(row, tablename="org_facility"):
""" Highest priority of open requests for a site """
if not settings.has_module("req"):
return None
try:
from req import REQ_STATUS_COMPLETE
except ImportError:
return None
if hasattr(row, tablename):
row = row[tablename]
try:
id = row.id
except AttributeError:
return None
s3db = current.s3db
rtable = s3db.req_req
stable = s3db[tablename]
query = (rtable.deleted != True) & \
(stable.id == id) & \
(rtable.site_id == stable.site_id) & \
(rtable.fulfil_status != REQ_STATUS_COMPLETE) & \
(rtable.is_template == False)
req = current.db(query).select(rtable.id,
rtable.priority,
orderby=~rtable.priority,
limitby=(0, 1)).first()
if req:
return req.priority
else:
return None
# =============================================================================
def org_rheader(r, tabs=[]):
""" Organisation/Office/Facility/Group page headers """
if r.representation != "html":
# RHeaders only used in interactive views
return None
# Need to use this format as otherwise req_match?viewing=org_office.x
# doesn't have an rheader
tablename, record = s3_rheader_resource(r)
if record is None:
# List or Create form: rheader makes no sense here
return None
T = current.T
s3db = current.s3db
# These 2 needed for req_match
r.record = record
r.table = \
table = s3db[tablename]
settings = current.deployment_settings
if tablename == "org_organisation":
# Tabs
if not tabs:
skip_branches = False
# If a filter is being applied to the Organisations, amend the tabs accordingly
type_filter = current.request.get_vars.get("organisation_type.name",
None)
if type_filter:
if type_filter == "Supplier":
skip_branches = True
tabs = [(T("Basic Details"), None),
(T("Offices"), "office"),
(T("Warehouses"), "warehouse"),
(T("Contacts"), "human_resource"),
]
elif type_filter == "Academic,Bilateral,Government,Intergovernmental,NGO,UN agency":
tabs = [(T("Basic Details"), None, {"native": 1}),
(T("Offices"), "office"),
(T("Warehouses"), "warehouse"),
(T("Contacts"), "human_resource"),
(T("Projects"), "project"),
]
else:
tabs = [(T("Basic Details"), None),
(T("Offices"), "office"),
(T("Warehouses"), "warehouse"),
(T("Facilities"), "facility"),
(T("Staff & Volunteers"), "human_resource"),
(T("Assets"), "asset"),
(T("Projects"), "project"),
(T("User Roles"), "roles"),
#(T("Tasks"), "task"),
]
if settings.get_org_resources_tab():
tabs.insert(-1, (T("Resources"), "resource"))
if settings.get_L10n_translate_org_organisation():
tabs.insert(1, (T("Local Names"), "name"))
# Use branches?
if settings.get_org_branches() and not skip_branches:
if settings.get_org_branches_tree_view():
presentation = "hierarchy"
else:
presentation = "branch"
tabs.insert(1, (T("Branches"), presentation))
rheader_tabs = s3_rheader_tabs(r, tabs)
# @ToDo: Update for Component
# if record.sector_id:
# if settings.get_ui_label_cluster():
# sector_label = T("Cluster(s)")
# else:
# sector_label = T("Sector(s)")
# sectors = TR(TH("%s: " % sector_label),
# table.sector_id.represent(record.sector_id))
# else:
# sectors = ""
if record.website:
website = TR(TH("%s: " % table.website.label),
A(record.website, _href=record.website))
else:
website = ""
if record.root_organisation != record.id:
btable = s3db.org_organisation_branch
query = (btable.branch_id == record.id) & \
(btable.organisation_id == table.id)
parent = current.db(query).select(table.id,
table.name,
limitby=(0, 1)
).first()
if parent:
parent = TR(TH("%s: " % T("Branch of")),
A(parent.name, _href=URL(args=[parent.id, "read"])))
else:
parent = ""
else:
parent = ""
rheader = DIV()
logo = org_organisation_logo(record)
rData = TABLE(TR(TH("%s: " % table.name.label),
record.name,
),
parent,
website,
#sectors,
)
if logo:
rheader.append(TABLE(TR(TD(logo), TD(rData))))
else:
rheader.append(rData)
rheader.append(rheader_tabs)
elif tablename in ("org_office", "org_facility"):
tabs = [(T("Basic Details"), None),
#(T("Contact Data"), "contact"),
]
append = tabs.append
if settings.has_module("hrm"):
STAFF = settings.get_hrm_staff_label()
tabs.append((STAFF, "human_resource"))
permit = current.auth.s3_has_permission
if permit("update", tablename, r.id) and \
permit("create", "hrm_human_resource_site"):
tabs.append((T("Assign %(staff)s") % dict(staff=STAFF), "assign"))
if settings.get_req_summary():
append((T("Needs"), "site_needs"))
if settings.has_module("asset"):
append((T("Assets"), "asset"))
if settings.has_module("inv"):
tabs = tabs + s3db.inv_tabs(r)
if settings.has_module("req"):
tabs = tabs + s3db.req_tabs(r)
tabs.extend(((T("Attachments"), "document"),
(T("User Roles"), "roles"),
))
if tablename == "org_office":
rheader_fields = [["name", "organisation_id", "email"],
["office_type_id", "location_id", "phone1"],
]
else:
def facility_type_lookup(record):
db = current.db
ltable = db.org_site_facility_type
ttable = db.org_facility_type
query = (ltable.site_id == record.site_id) & \
(ltable.facility_type_id == ttable.id)
rows = db(query).select(ttable.name)
if rows:
return ", ".join([row.name for row in rows])
else:
return current.messages["NONE"]
rheader_fields = [["name", "organisation_id", "email"],
[(T("Facility Type"), facility_type_lookup),
"location_id", "phone1"],
]
rheader_fields, rheader_tabs = S3ResourceHeader(rheader_fields,
tabs)(r, as_div=True)
# Inject logo
logo = org_organisation_logo(record.organisation_id)
if logo:
rheader = DIV(TABLE(TR(TD(logo),
TD(rheader_fields))))
else:
rheader = DIV(rheader_fields)
rheader.append(rheader_tabs)
if settings.has_module("inv"):
# Build footer
s3db.inv_rfooter(r, record)
elif tablename == "org_group":
tabs = [(T("Basic Details"), None),
(T("Member Organizations"), "organisation"),
(T("Groups"), "pr_group"),
(T("Documents"), "document"),
]
if current.auth.s3_has_permission("create", "org_group_membership"):
tabs.insert(2, (T("Add Organization"), "assign"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(
TH("%s: " % table.name.label),
record.name,
)),
rheader_tabs)
elif tablename in ("org_organisation_type", "org_office_type"):
tabs = [(T("Basic Details"), None),
(T("Tags"), "tag"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(
TH("%s: " % table.name.label),
record.name,
)),
rheader_tabs)
return rheader
# =============================================================================
def org_organisation_controller():
"""
Organisation Controller, defined in the model for use from
multiple controllers for unified menus
"""
T = current.T
db = current.db
s3db = current.s3db
s3 = current.response.s3
settings = current.deployment_settings
# Pre-process
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.representation == "json":
r.table.pe_id.readable = True
list_fields = s3db.get_config(r.tablename,
"list_fields") or []
s3db.configure(r.tablename, list_fields=list_fields + ["pe_id"])
elif r.interactive or r.representation == "aadata":
gis = current.gis
r.table.country.default = gis.get_default_country("code")
method = r.method
use_branches = settings.get_org_branches()
if use_branches and not r.component and not r.record:
# Filter out branches from multi-record views
branch_filter = (FS("parent.id") == None)
# Filter Locations
lfilter = current.session.s3.location_filter
if lfilter:
# Include those whose parent is in a different country
gtable = s3db.gis_location
query = (gtable.id == lfilter)
row = db(query).select(gtable.id,
gtable.name,
gtable.level,
gtable.path,
limitby=(0, 1)).first()
if row and row.level:
if row.level != "L0":
code = gis.get_parent_country(row, key_type="code")
else:
ttable = s3db.gis_location_tag
query = (ttable.tag == "ISO2") & \
(ttable.location_id == row.id)
tag = db(query).select(ttable.value,
limitby=(0, 1)).first()
code = tag.value
branch_filter |= (FS("parent.country") != code) | \
(FS("parent.country") == None)
r.resource.add_filter(branch_filter)
if not r.component or r.component_name == "branch":
type_filter = r.get_vars.get("organisation_type.name", None)
if type_filter:
type_names = [name.lower().strip()
for name in type_filter.split(",")]
field = s3db.org_organisation_organisation_type.organisation_type_id
field.comment = None # Don't want to create new types here
if len(type_names) == 1:
# Strip Type from list_fields
list_fields = s3db.get_config("org_organisation",
"list_fields")
try:
list_fields.remove("organisation_organisation_type.organisation_type_id")
except ValueError:
# Already removed
pass
if not method or method == "create":
# Default the Type
type_table = s3db.org_organisation_type
query = (type_table.name == type_filter)
row = db(query).select(type_table.id,
limitby=(0, 1)).first()
type_id = row and row.id
if type_id:
field.default = type_id
field.writable = False
crud_form = s3db.get_config("org_organisation",
"crud_form")
for e in crud_form.elements:
if e.selector == "organisation_type":
e.options.label = ""
elif not method or method in ("create", "update"):
# Limit the Type
type_table = s3db.org_organisation_type
fquery = (type_table.name.lower().belongs(type_names))
field.requires = IS_ONE_OF(db(fquery),
"org_organisation_type.id",
label=field.represent,
error_message=T("Please choose a type"),
sort=True)
if r.component:
cname = r.component_name
if cname == "human_resource" and r.component_id:
# Workaround until widget is fixed:
htable = s3db.hrm_human_resource
htable.person_id.widget = None
htable.person_id.writable = False
elif cname == "branch":
# Branches default to the same type/country as the parent
otable = r.table
record = r.record
otable.region_id.default = record.region_id
otable.country.default = record.country
ottable = s3db.org_organisation_organisation_type
row = db(ottable.organisation_id == record.id).select(ottable.organisation_type_id,
limitby=(0, 1),
).first()
if row:
ottable.organisation_type_id.default = row.organisation_type_id
ostable = s3db.org_sector_organisation
row = db(ostable.organisation_id == record.id).select(ostable.sector_id,
limitby=(0, 1),
).first()
if row:
ostable.sector_id.default = row.sector_id
# Represent orgs without the parent prefix as we have that context already
branch_represent = org_OrganisationRepresent(parent=False,
skip_dt_orderby=True,
)
s3db.org_organisation_branch.branch_id.represent = branch_represent
elif cname == "task" and \
method != "update" and method != "read":
# Create or ListCreate
ttable = r.component.table
ttable.organisation_id.default = r.id
ttable.status.writable = False
ttable.status.readable = False
elif cname == "asset":
# Filter the Site field
field = s3db.super_link("site_id", "org_site",
empty = False,
filterby="organisation_id",
filter_opts=(r.id,),
represent = s3db.org_site_represent,
)
atable = s3db.asset_asset
atable.site_id.requires = field.requires
# Stay within Organisation tab
s3db.configure("asset_asset",
create_next = None,
)
elif cname == "project" and r.link:
# Hide/show host role after project selection in embed-widget
tn = r.link.tablename
s3db.configure(tn,
post_process='''S3.hide_host_role($('#%s').val())''')
s3.scripts.append("/%s/static/scripts/S3/s3.hide_host_role.js" % \
r.application)
s3db.configure("project_project",
create_next = None,
)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
cname = r.component_name
if cname == "human_resource":
# Modify action button to open staff instead of human_resource
# (Delete not overridden to keep errors within Tab)
read_url = URL(c="hrm", f="staff", args=["[id]"])
update_url = URL(c="hrm", f="staff", args=["[id]", "update"])
S3CRUD.action_buttons(r, read_url=read_url,
update_url=update_url)
return output
s3.postp = postp
output = current.rest_controller("org", "organisation",
# Need to be explicit since can also come from Project controller
csv_stylesheet = ("org", "organisation.xsl"),
csv_template = ("org", "organisation"),
# Don't allow components with components (such as document) to breakout from tabs
native = False,
rheader = org_rheader,
)
return output
# =============================================================================
def org_site_staff_config(r):
"""
Configure the Staff tab for Sites
"""
table = current.s3db.hrm_human_resource
settings = current.deployment_settings
if settings.has_module("vol"):
if settings.get_hrm_show_staff():
if settings.get_org_site_volunteers():
# Show the type field
field = table.type
field.label = current.T("Type")
field.readable = field.writable = True
#else:
# # Filter to just Staff
# r.resource.add_filter(FS("human_resource.type") == 1)
elif settings.get_org_site_volunteers():
# Default to Volunteers
table.type.default = 2
# Cascade the organisation_id from the site to the staff
field = table.organisation_id
field.default = r.record.organisation_id
field.writable = False
field.comment = None
# Filter out people which are already staff for this office
# - this only works for an IS_ONE_OF dropdown
# - @ToDo: Pass a flag to pr_search_ac via S3AddPersonWidget2 to do the same thing
#site_id = record.site_id
#try:
# person_id_field = r.target()[2].person_id
#except:
# pass
#else:
# query = (htable.site_id == site_id) & \
# (htable.deleted == False)
# staff = current.db(query).select(htable.person_id)
# person_ids = [row.person_id for row in staff]
# try:
# person_id_field.requires.set_filter(not_filterby = "id",
# not_filter_opts = person_ids)
# except:
# pass
# =============================================================================
def org_office_controller():
"""
Office Controller, defined in the model for use from
multiple controllers for unified menus
"""
T = current.T
s3db = current.s3db
request = current.request
s3 = current.response.s3
settings = current.deployment_settings
# Get default organisation_id
req_vars = request.vars
organisation_id = req_vars["organisation_id"]
if type(organisation_id) is list:
req_vars["organisation_id"] = organisation_id[0]
organisation_id = req_vars["organisation_id"] or \
current.session.s3.organisation_id or \
""
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
table = r.table
if organisation_id:
table.organisation_id.default = organisation_id
if r.representation == "plain":
# Map popups want less clutter
table.obsolete.readable = False
if r.interactive:
if r.component:
cname = r.component_name
if cname in ("inv_item", "recv", "send"):
# Filter out items which are already in this inventory
s3db.inv_prep(r)
# Remove CRUD generated buttons in the tabs
s3db.configure("inv_inv_item",
create = False,
deletable = False,
editable = False,
listadd = False,
)
elif cname == "human_resource":
org_site_staff_config(r)
elif cname == "req" and r.method not in ("update", "read"):
# Hide fields which don't make sense in a Create form
# inc list_create (list_fields over-rides)
s3db.req_create_form_mods()
elif cname == "asset":
# Default/Hide the Organisation & Site fields
record = r.record
atable = s3db.asset_asset
field = atable.organisation_id
field.default = record.organisation_id
field.readable = field.writable = False
field = atable.site_id
field.default = record.site_id
field.readable = field.writable = False
# Stay within Office tab
s3db.configure("asset_asset",
create_next = None)
elif r.method in ("create", "update"):
if r.method == "update":
table.obsolete.readable = table.obsolete.writable = True
# Context from a Profile page?"
org_id = request.get_vars.get("(organisation)", None)
if org_id:
field = table.organisation_id
field.default = org_id
field.readable = field.writable = False
elif r.id:
table.obsolete.readable = table.obsolete.writable = True
elif r.representation == "geojson":
marker_fn = s3db.get_config("org_office", marker_fn)
if marker_fn:
# Load these models now as they'll be needed when we encode
mtable = s3db.gis_marker
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
if not r.component and \
settings.get_org_summary():
# Insert fields to view/record the summary data
# @ToDo: Re-implement using http://eden.sahanafoundation.org/wiki/S3SQLForm
table = s3db.org_office_summary
field1 = table.national_staff
field2 = table.international_staff
row = None
if r.id:
query = (table.office_id == r.id)
row = current.db(query).select(field1,
field2,
limitby=(0, 1)).first()
s3_formstyle = settings.get_ui_formstyle()
if r.method == "read" and \
"item" in output:
for field in [field1, field2]:
if row:
widget = row[field]
else:
widget = current.messages["NONE"]
field_id = "%s_%s" % (table._tablename, field.name)
label = field.label
label = LABEL(label, _for=field_id,
_id=field_id + SQLFORM.ID_LABEL_SUFFIX)
row_id = field_id + SQLFORM.ID_ROW_SUFFIX
comment = ""
rows = s3_formstyle(row_id, label, widget, comment)
try:
# Insert Label row
output["item"][0].insert(-2, rows[0])
except:
pass
try:
# Insert Widget row
output["item"][0].insert(-2, rows[1])
except:
# A non-standard formstyle with just a single row
pass
elif r.method not in ("import", "map") and \
"form" in output:
sep = ": "
for field in [field1, field2]:
if row:
default = row[field]
else:
default = field.default
widget = field.widget or SQLFORM.widgets.integer.widget(field, default)
field_id = "%s_%s" % (table._tablename, field.name)
label = field.label
label = LABEL(label, label and sep, _for=field_id,
_id=field_id + SQLFORM.ID_LABEL_SUFFIX)
comment = field.comment or ""
row_id = field_id + SQLFORM.ID_ROW_SUFFIX
rows = s3_formstyle(row_id, label, widget, comment)
try:
# Insert Label row
output["form"][0].insert(-4, rows[0])
except:
pass
try:
# Insert Widget row
output["form"][0].insert(-4, rows[1])
except:
# A non-standard formstyle with just a single row
pass
else:
cname = r.component_name
if cname == "human_resource":
# Modify action button to open staff instead of human_resource
# (Delete not overridden to keep errors within Tab)
read_url = URL(c="hrm", f="staff", args=["[id]"])
update_url = URL(c="hrm", f="staff", args=["[id]", "update"])
S3CRUD.action_buttons(r, read_url=read_url,
update_url=update_url)
return output
s3.postp = postp
output = current.rest_controller("org", "office",
# Don't allow components with components (such as document) to breakout from tabs
native = False,
rheader = org_rheader,
)
return output
# =============================================================================
def org_facility_controller():
"""
Facility Controller, defined in the model for use from
multiple controllers for unified menus
"""
s3db = current.s3db
s3 = current.response.s3
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.interactive:
if r.component:
cname = r.component_name
if cname in ("inv_item", "recv", "send"):
# Filter out items which are already in this inventory
s3db.inv_prep(r)
# remove CRUD-generated buttons in the tabs
s3db.configure("inv_inv_item",
create = False,
deletable = False,
editable = False,
listadd = False,
)
elif cname == "human_resource":
org_site_staff_config(r)
elif cname == "req" and r.method not in ("update", "read"):
# Hide fields which don't make sense in a Create form
# inc list_create (list_fields over-rides)
s3db.req_create_form_mods()
elif cname == "asset":
# Default/Hide the Organisation & Site fields
record = r.record
atable = s3db.asset_asset
field = atable.organisation_id
field.default = record.organisation_id
field.readable = field.writable = False
field = atable.site_id
field.default = record.site_id
field.readable = field.writable = False
# Stay within Facility tab
s3db.configure("asset_asset",
create_next = None)
elif r.id:
table = r.table
field = table.obsolete
field.readable = field.writable = True
if r.method == "update" and \
r.representation == "popup" and \
r.get_vars.get("profile") == "org_organisation":
# Coming from organisation profile
# Don't allow change of organisation_id in this case
field = table.organisation_id
field.writable = False
field.readable = False
elif r.method == "create":
table = r.table
get_vars = r.get_vars
name = get_vars.get("name")
if name:
table.name.default = name
if r.representation == "popup" and \
get_vars.get("profile") == "org_organisation":
# Coming from organisation profile
organisation_id = None
for k in ("~.organisation_id", "(organisation)", "~.(organisation)"):
if k in get_vars:
organisation_id = get_vars[k]
break
if organisation_id is not None:
# Don't allow change of organisation_id in this case
field = table.organisation_id
field.default = organisation_id
field.writable = False
field.readable = False
elif r.representation == "geojson":
# Load these models now as they'll be needed when we encode
mtable = s3db.gis_marker
return True
s3.prep = prep
def postp(r, output):
if r.representation == "plain":
# Custom Map Popup
T = current.T
output = TABLE()
append = output.append
record = r.record
# Edit button
append(TR(TD(A(T("Edit"),
_target="_blank",
_id="edit-btn",
_href=URL(args=[r.id, "update"])))))
# Name
append(TR(TD(B("%s:" % T("Name"))),
TD(record.name)))
site_id = record.site_id
# Type(s)
db = current.db
ttable = db.org_facility_type
ltable = db.org_site_facility_type
query = (ltable.site_id == site_id) & \
(ltable.facility_type_id == ttable.id)
rows = db(query).select(ttable.name)
if rows:
append(TR(TD(B("%s:" % ltable.facility_type_id.label)),
TD(", ".join([row.name for row in rows]))))
ftable = r.table
# Comments
if record.comments:
append(TR(TD(B("%s:" % ftable.comments.label)),
TD(ftable.comments.represent(record.comments))))
# Organisation (better with just name rather than Represent)
# @ToDo: Make this configurable - some users will only see
# their staff so this is a meaningless field for them
table = db.org_organisation
org = db(table.id == record.organisation_id).select(table.name,
limitby=(0, 1)
).first()
if org:
append(TR(TD(B("%s:" % ftable.organisation_id.label)),
TD(org.name)))
if current.deployment_settings.has_module("req"):
# Open High/Medium priority Requests
rtable = s3db.req_req
query = (rtable.site_id == site_id) & \
(rtable.fulfil_status != 2) & \
(rtable.priority.belongs((2, 3)))
reqs = db(query).select(rtable.id,
rtable.req_ref,
rtable.type,
)
if reqs:
append(TR(TD(B("%s:" % T("Requests")))))
req_types = {1: "req_item",
3: "req_skill",
8: "",
9: "",
}
vals = [A(req.req_ref,
_href=URL(c="req", f="req",
args=[req.id, req_types[req.type]])) for req in reqs]
for val in vals:
append(TR(TD(val, _colspan=2)))
# Street address
gtable = s3db.gis_location
stable = s3db.org_site
query = (gtable.id == stable.location_id) & \
(stable.id == site_id)
location = db(query).select(gtable.addr_street,
limitby=(0, 1)).first()
if location.addr_street:
append(TR(TD(B("%s:" % gtable.addr_street.label)),
TD(location.addr_street)))
# Opening Times
opens = record.opening_times
if opens:
append(TR(TD(B("%s:" % ftable.opening_times.label)),
TD(opens)))
# Phone number
contact = record.contact
if contact:
append(TR(TD(B("%s:" % ftable.contact.label)),
TD(contact)))
# Phone number
phone1 = record.phone1
if phone1:
append(TR(TD(B("%s:" % ftable.phone1.label)),
TD(phone1)))
# Email address (as hyperlink)
email = record.email
if email:
append(TR(TD(B("%s:" % ftable.email.label)),
TD(A(email, _href="mailto:%s" % email))))
# Website (as hyperlink)
website = record.website
if website:
append(TR(TD(B("%s:" % ftable.website.label)),
TD(A(website, _href=website))))
return output
s3.postp = postp
output = current.rest_controller("org", "facility",
rheader = org_rheader,
)
return output
# =============================================================================
# Hierarchy Manipulation
# =============================================================================
#
def org_update_affiliations(table, record):
"""
Update OU affiliations related to this record
@param table: the table
@param record: the record
"""
if hasattr(table, "_tablename"):
rtype = table._tablename
else:
rtype = table
if rtype == "org_organisation_branch":
ltable = current.s3db.org_organisation_branch
if not isinstance(record, Row):
record = current.db(ltable.id == record).select(ltable.ALL,
limitby=(0, 1)
).first()
if not record:
return
organisation_update_affiliations(record)
elif rtype == "org_group_membership":
mtable = current.s3db.org_group_membership
if not isinstance(record, Row):
record = current.db(mtable.id == record).select(mtable.ALL,
limitby=(0, 1)
).first()
if not record:
return
org_group_update_affiliations(record)
elif rtype == "org_site" or rtype in current.auth.org_site_types:
if "organisation_id" not in record:
# Probably created on component tab, so form does not have the
# organisation_id => reload record to get it
rtable = current.s3db[rtype]
try:
query = (rtable._id == record[rtable._id.name])
except (KeyError, AttributeError):
return
record = current.db(query).select(rtable.ALL,
limitby=(0, 1)).first()
org_site_update_affiliations(record)
# =============================================================================
def organisation_update_affiliations(record):
"""
Update affiliations for a branch organisation
@param record: the org_organisation_branch record
"""
if record.deleted and record.deleted_fk:
try:
fk = json.loads(record.deleted_fk)
branch_id = fk["branch_id"]
except:
return
else:
branch_id = record.branch_id
from pr import OU
BRANCHES = "Branches"
db = current.db
s3db = current.s3db
otable = s3db.org_organisation
btable = otable.with_alias("branch")
ltable = db.org_organisation_branch
etable = s3db.pr_pentity
rtable = db.pr_role
atable = db.pr_affiliation
o = otable._tablename
b = btable._tablename
r = rtable._tablename
# Get current memberships
query = (ltable.branch_id == branch_id) & \
(ltable.deleted != True)
left = [otable.on(ltable.organisation_id == otable.id),
btable.on(ltable.branch_id == btable.id)]
rows = db(query).select(otable.pe_id, btable.pe_id, left=left)
current_memberships = [(row[o].pe_id, row[b].pe_id) for row in rows]
# Get current affiliations
query = (rtable.deleted != True) & \
(rtable.role == BRANCHES) & \
(rtable.pe_id == etable.pe_id) & \
(etable.instance_type == o) & \
(atable.deleted != True) & \
(atable.role_id == rtable.id) & \
(atable.pe_id == btable.pe_id) & \
(btable.id == branch_id)
rows = db(query).select(rtable.pe_id, btable.pe_id)
current_affiliations = [(row[r].pe_id, row[b].pe_id) for row in rows]
# Remove all affiliations which are not current memberships
remove_affiliation = s3db.pr_remove_affiliation
for a in current_affiliations:
org, branch = a
if a not in current_memberships:
remove_affiliation(org, branch, role=BRANCHES)
else:
current_memberships.remove(a)
# Add affiliations for all new memberships
add_affiliation = s3db.pr_add_affiliation
for m in current_memberships:
org, branch = m
add_affiliation(org, branch, role=BRANCHES, role_type=OU)
# =============================================================================
def org_group_update_affiliations(record):
"""
Update affiliations for organisation group memberships
@param record: the org_group_membership record
"""
if record.deleted and record.deleted_fk:
try:
fk = json.loads(record.deleted_fk)
organisation_id = fk["organisation_id"]
except:
return
else:
organisation_id = record.organisation_id
MEMBER = 2 # role_type == "Member"
MEMBERS = "Members"
db = current.db
s3db = current.s3db
mtable = s3db.org_group_membership
otable = db.org_organisation
gtable = db.org_group
etable = s3db.pr_pentity
rtable = db.pr_role
atable = db.pr_affiliation
g = gtable._tablename
r = rtable._tablename
o = otable._tablename
# Get current memberships
query = (mtable.organisation_id == organisation_id) & \
(mtable.deleted != True)
left = [otable.on(mtable.organisation_id == otable.id),
gtable.on(mtable.group_id == gtable.id)]
rows = db(query).select(otable.pe_id, gtable.pe_id, left=left)
current_memberships = [(row[g].pe_id, row[o].pe_id) for row in rows]
# Get current affiliations
query = (rtable.deleted != True) & \
(rtable.role == MEMBERS) & \
(rtable.pe_id == etable.pe_id) & \
(etable.instance_type == g) & \
(atable.deleted != True) & \
(atable.role_id == rtable.id) & \
(atable.pe_id == otable.pe_id) & \
(otable.id == organisation_id)
rows = db(query).select(otable.pe_id, rtable.pe_id)
current_affiliations = [(row[r].pe_id, row[o].pe_id) for row in rows]
# Remove all affiliations which are not current memberships
remove_affiliation = s3db.pr_remove_affiliation
for a in current_affiliations:
group, org = a
if a not in current_memberships:
remove_affiliation(group, org, role=MEMBERS)
else:
current_memberships.remove(a)
# Add affiliations for all new memberships
add_affiliation = s3db.pr_add_affiliation
for m in current_memberships:
group, org = m
add_affiliation(group, org, role=MEMBERS, role_type=MEMBER)
# =============================================================================
def org_site_update_affiliations(record):
"""
Update the affiliations of an org_site instance
@param record: the org_site instance record
"""
from pr import OU
SITES = "Sites"
db = current.db
s3db = current.s3db
stable = s3db.org_site
otable = db.org_organisation
ptable = s3db.pr_pentity
rtable = db.pr_role
atable = db.pr_affiliation
o_pe_id = None
s_pe_id = record.pe_id
organisation_id = record.organisation_id
if organisation_id:
org = db(otable.id == organisation_id).select(otable.pe_id,
limitby=(0, 1)).first()
if org:
o_pe_id = org.pe_id
if s_pe_id:
query = (atable.deleted != True) & \
(atable.pe_id == s_pe_id) & \
(rtable.deleted != True) & \
(rtable.id == atable.role_id) & \
(rtable.role == SITES) & \
(ptable.pe_id == rtable.pe_id) & \
(ptable.instance_type == str(otable))
rows = db(query).select(rtable.pe_id)
seen = False
remove_affiliation = s3db.pr_remove_affiliation
for row in rows:
if o_pe_id == None or o_pe_id != row.pe_id:
remove_affiliation(row.pe_id, s_pe_id, role=SITES)
elif o_pe_id == row.pe_id:
seen = True
if o_pe_id and not seen:
s3db.pr_add_affiliation(o_pe_id, s_pe_id, role=SITES,
role_type=OU)
# =============================================================================
def org_update_root_organisation(organisation_id, root_org=None):
"""
Update the root organisation of an org_organisation
@param organisation_id: the org_organisation record ID
@param root_org: the root organisation record ID (for
internal use in update cascade only)
@return: the root organisation ID
"""
# @todo: make immune against circular references!
db = current.db
s3db = current.s3db
otable = s3db.org_organisation
ltable = s3db.org_organisation_branch
if root_org is None:
# Batch update (introspective)
if isinstance(organisation_id, (list, tuple, set)):
for organisation in organisation_id:
org_update_root_organisation(organisation)
return None
# Get the parent organisation
query = (ltable.branch_id == organisation_id) & \
(ltable.organisation_id == otable.id)
parent_org = db(query).select(otable.id,
otable.root_organisation,
limitby=(0, 1)).first()
if not parent_org:
# No parent organisation? => this is the root organisation
root_org = organisation_id
else:
# Use parent organisation's root_organisation
root_org = parent_org.root_organisation
if not root_org:
# Not present? => update it
root_org = org_update_root_organisation(parent_org.id)
if root_org is not None:
# Update the record(s)
if isinstance(organisation_id, (list, tuple, set)):
oquery = (otable.id.belongs(organisation_id))
bquery = (ltable.organisation_id.belongs(organisation_id))
else:
oquery = (otable.id == organisation_id)
bquery = (ltable.organisation_id == organisation_id)
db(oquery).update(root_organisation=root_org)
# Propagate to all branches (explicit batch update)
branches = db(bquery).select(ltable.branch_id)
if branches:
branch_ids = set(branch.branch_id for branch in branches)
org_update_root_organisation(branch_ids, root_org=root_org)
return root_org
# =============================================================================
class org_AssignMethod(S3Method):
"""
Custom Method to allow organisations to be assigned to something
e.g. Organisation Group
"""
def __init__(self, component, types=None):
"""
@param component: the Component in which to create records
"""
self.component = component
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
component = self.component
components = r.resource.components
for c in components:
if c == component:
component = components[c]
break
try:
if component.link:
component = component.link
except:
current.log.error("Invalid Component!")
raise
tablename = component.tablename
# Requires permission to create component
authorised = current.auth.s3_has_permission("create", tablename)
if not authorised:
r.unauthorised()
T = current.T
s3db = current.s3db
get_vars = r.get_vars
response = current.response
if r.http == "POST":
added = 0
post_vars = r.post_vars
if all([n in post_vars for n in ("assign", "selected", "mode")]):
fkey = component.fkey
record = r.record
if fkey in record:
# SuperKey
record_id = r.record[fkey]
else:
record_id = r.id
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
db = current.db
table = s3db[tablename]
if selected:
# Handle exclusion filter
if post_vars.mode == "Exclusive":
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.ajaxURL)
else:
filters = None
query = ~(FS("id").belongs(selected))
hresource = s3db.resource("org_organisation",
filter=query, vars=filters)
rows = hresource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
query = (table.organisation_id.belongs(selected)) & \
(table[fkey] == record_id) & \
(table.deleted != True)
rows = db(query).select(table.id)
rows = dict((row.id, row) for row in rows)
onaccept = component.get_config("create_onaccept",
component.get_config("onaccept",
None)
)
for organisation_id in selected:
try:
org_id = int(organisation_id.strip())
except ValueError:
continue
if org_id not in rows:
link = Storage(organisation_id = organisation_id)
link[fkey] = record_id
_id = table.insert(**link)
if onaccept:
link["id"] = _id
form = Storage(vars=link)
onaccept(form)
added += 1
current.session.confirmation = T("%(number)s assigned") % \
dict(number=added)
if added > 0:
redirect(URL(args=[r.id, "organisation"], vars={}))
else:
redirect(URL(args=r.args, vars={}))
elif r.http == "GET":
# Filter widgets
filter_widgets = []
# List fields
list_fields = ["id",
"name",
]
# Data table
resource = s3db.resource("org_organisation")
totalrows = resource.count()
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
filter, orderby, left = resource.datatable_filter(list_fields, get_vars)
resource.add_filter(filter)
data = resource.select(list_fields,
start=0,
limit=limit,
orderby=orderby,
left=left,
count=True,
represent=True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
dt_id = "datatable"
# Bulk actions
dt_bulk_actions = [(T("Add"), "assign")]
if r.representation == "html":
# Page load
resource.configure(deletable = False)
profile_url = URL(c = "org",
f = "organisation",
args = ["[id]", "profile"])
S3CRUD.action_buttons(r,
deletable = False,
read_url = profile_url,
update_url = profile_url)
response.s3.no_formats = True
# Data table (items)
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_ajax_url=URL(args = r.args,
extension="aadata",
vars={},
),
dt_bulk_actions=dt_bulk_actions,
dt_pageLength=display_length,
dt_pagination="true",
dt_searching="false",
)
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
_vars = resource.crud._remove_filters(r.get_vars)
filter_submit_url = r.url(vars=_vars)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f="human_resource",
args=["filter.options"],
vars={})
get_config = resource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear=filter_clear,
formstyle=filter_formstyle,
submit=filter_submit,
ajax=True,
url=filter_submit_url,
ajaxurl=filter_ajax_url,
_class="filter-form",
_id="datatable-filter-form",
)
fresource = current.s3db.resource(resource.tablename)
alias = resource.alias if r.component else None
ff = filter_form.html(fresource,
r.get_vars,
target="datatable",
alias=alias)
else:
ff = ""
output = dict(items = items,
title = T("Add Organization"),
list_filter_form = ff)
response.view = "list_filter.html"
return output
elif r.representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars.draw)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions=dt_bulk_actions)
response.headers["Content-Type"] = "application/json"
return items
else:
r.error(501, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
# =============================================================================
def org_customise_org_resource_fields(method):
"""
Customize org_resource fields for Profile widgets and 'more' popups
"""
s3db = current.s3db
table = s3db.org_resource
table.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
list_fields = ["organisation_id",
"location_id",
"parameter_id",
"value",
"comments",
]
if method in ("datalist", "profile"):
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = lambda dt: \
S3DateTime.datetime_represent(dt, utc=True)
list_fields += ["modified_by",
"modified_on",
"organisation_id$logo",
]
s3db.configure("org_resource",
list_fields = list_fields,
)
# =============================================================================
def org_organisation_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Organisations on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_organisation.id"]
item_class = "thumbnail" # span6 for 2 cols
raw = record._row
name = record["org_organisation.name"]
logo = raw["org_organisation.logo"]
phone = raw["org_organisation.phone"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
org_url = URL(c="org", f="organisation", args=[record_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
db = current.db
permit = current.auth.s3_has_permission
table = db.org_organisation
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="organisation",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal dl-item-edit",
_title=current.response.s3.crud_strings.org_organisation.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(logo,
DIV(SPAN(A(name,
_href=org_url,
_class="media-heading"
),
),
edit_bar,
_class="card-header-select",
),
DIV(P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def org_resource_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Resources on Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_resource.id"]
item_class = "thumbnail"
raw = record._row
author = record["org_resource.modified_by"]
date = record["org_resource.modified_on"]
quantity = record["org_resource.value"]
resource_type = record["org_resource.parameter_id"]
comments = raw["org_resource.comments"]
organisation = record["org_resource.organisation_id"]
organisation_id = raw["org_resource.organisation_id"]
location = record["org_resource.location_id"]
location_id = raw["org_resource.location_id"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
logo = raw["org_organisation.logo"]
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
# @ToDo: use a dummy logo image
logo = A(IMG(_class="media-object"),
_href=org_url,
_class="pull-left",
)
# Edit Bar
permit = current.auth.s3_has_permission
table = current.db.org_resource
if permit("update", table, record_id=record_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
elif f == "location" and location_id:
vars["(location)"] = location_id
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="resource",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_resource.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
avatar = logo
item = DIV(DIV(SPAN("%s %s" % (quantity, current.T(resource_type)), _class="card-title"),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(comments,
DIV(author or "" ,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# END =========================================================================
|
the-stack_106_13906
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.AssetResult import AssetResult
class AntMerchantExpandAssetproduceCompleteSyncResponse(AlipayResponse):
def __init__(self):
super(AntMerchantExpandAssetproduceCompleteSyncResponse, self).__init__()
self._asset_results = None
@property
def asset_results(self):
return self._asset_results
@asset_results.setter
def asset_results(self, value):
if isinstance(value, list):
self._asset_results = list()
for i in value:
if isinstance(i, AssetResult):
self._asset_results.append(i)
else:
self._asset_results.append(AssetResult.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AntMerchantExpandAssetproduceCompleteSyncResponse, self).parse_response_content(response_content)
if 'asset_results' in response:
self.asset_results = response['asset_results']
|
the-stack_106_13908
|
# -*- coding: utf-8 -*-
# File: backbone.py
import numpy as np
import tensorflow as tf
from contextlib import ExitStack, contextmanager
from tensorpack.models import BatchNorm, Conv2D, MaxPooling, layer_register
from tensorpack.tfutils import argscope
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
from tensorpack.tfutils.varreplace import custom_getter_scope, freeze_variables
from mot.object_detection.config import config as cfg
@layer_register(log_shape=True)
def GroupNorm(x, group=32, gamma_initializer=tf.constant_initializer(1.)):
"""
More code that reproduces the paper can be found at https://github.com/ppwwyyxx/GroupNorm-reproduce/.
"""
shape = x.get_shape().as_list()
ndims = len(shape)
assert ndims == 4, shape
chan = shape[1]
assert chan % group == 0, chan
group_size = chan // group
orig_shape = tf.shape(x)
h, w = orig_shape[2], orig_shape[3]
x = tf.reshape(x, tf.stack([-1, group, group_size, h, w]))
mean, var = tf.nn.moments(x, [2, 3, 4], keep_dims=True)
new_shape = [1, group, group_size, 1, 1]
beta = tf.get_variable('beta', [chan], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
gamma = tf.get_variable('gamma', [chan], initializer=gamma_initializer)
gamma = tf.reshape(gamma, new_shape)
out = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5, name='output')
return tf.reshape(out, orig_shape, name='output')
def freeze_affine_getter(getter, *args, **kwargs):
# custom getter to freeze affine params inside bn
name = args[0] if len(args) else kwargs.get('name')
if name.endswith('/gamma') or name.endswith('/beta'):
kwargs['trainable'] = False
ret = getter(*args, **kwargs)
tf.add_to_collection(tf.GraphKeys.MODEL_VARIABLES, ret)
else:
ret = getter(*args, **kwargs)
return ret
def maybe_reverse_pad(topleft, bottomright):
if cfg.BACKBONE.TF_PAD_MODE:
return [topleft, bottomright]
return [bottomright, topleft]
@contextmanager
def backbone_scope(freeze):
"""
Args:
freeze (bool): whether to freeze all the variables under the scope
"""
def nonlin(x):
x = get_norm()(x)
return tf.nn.relu(x)
with argscope([Conv2D, MaxPooling, BatchNorm], data_format='channels_first'), \
argscope(Conv2D, use_bias=False, activation=nonlin,
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out')), \
ExitStack() as stack:
if cfg.BACKBONE.NORM in ['FreezeBN', 'SyncBN']:
if freeze or cfg.BACKBONE.NORM == 'FreezeBN':
stack.enter_context(argscope(BatchNorm, training=False))
else:
stack.enter_context(argscope(
BatchNorm, sync_statistics='nccl' if cfg.TRAINER == 'replicated' else 'horovod'))
if freeze:
stack.enter_context(freeze_variables(stop_gradient=False, skip_collection=True))
else:
# the layers are not completely freezed, but we may want to only freeze the affine
if cfg.BACKBONE.FREEZE_AFFINE:
stack.enter_context(custom_getter_scope(freeze_affine_getter))
yield
def image_preprocess(image, bgr=True):
with tf.name_scope('image_preprocess'):
if image.dtype.base_dtype != tf.float32:
image = tf.cast(image, tf.float32)
mean = cfg.PREPROC.PIXEL_MEAN
std = np.asarray(cfg.PREPROC.PIXEL_STD)
if bgr:
mean = mean[::-1]
std = std[::-1]
image_mean = tf.constant(mean, dtype=tf.float32)
image_invstd = tf.constant(1.0 / std, dtype=tf.float32)
image = (image - image_mean) * image_invstd
return image
def get_norm(zero_init=False):
if cfg.BACKBONE.NORM == 'None':
return lambda x: x
if cfg.BACKBONE.NORM == 'GN':
Norm = GroupNorm
layer_name = 'gn'
else:
Norm = BatchNorm
layer_name = 'bn'
return lambda x: Norm(layer_name, x, gamma_initializer=tf.zeros_initializer() if zero_init else None)
def resnet_shortcut(l, n_out, stride, activation=tf.identity):
n_in = l.shape[1]
if n_in != n_out: # change dimension when channel is not the same
# TF's SAME mode output ceil(x/stride), which is NOT what we want when x is odd and stride is 2
# In FPN mode, the images are pre-padded already.
if not cfg.MODE_FPN and stride == 2:
l = l[:, :, :-1, :-1]
return Conv2D('convshortcut', l, n_out, 1,
strides=stride, activation=activation)
else:
return l
def resnet_bottleneck(l, ch_out, stride):
shortcut = l
if cfg.BACKBONE.STRIDE_1X1:
if stride == 2:
l = l[:, :, :-1, :-1]
l = Conv2D('conv1', l, ch_out, 1, strides=stride)
l = Conv2D('conv2', l, ch_out, 3, strides=1)
else:
l = Conv2D('conv1', l, ch_out, 1, strides=1)
if stride == 2:
l = tf.pad(l, [[0, 0], [0, 0], maybe_reverse_pad(0, 1), maybe_reverse_pad(0, 1)])
l = Conv2D('conv2', l, ch_out, 3, strides=2, padding='VALID')
else:
l = Conv2D('conv2', l, ch_out, 3, strides=stride)
if cfg.BACKBONE.NORM != 'None':
l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_norm(zero_init=True))
else:
l = Conv2D('conv3', l, ch_out * 4, 1, activation=tf.identity,
kernel_initializer=tf.constant_initializer())
ret = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_norm(zero_init=False))
return tf.nn.relu(ret, name='output')
def resnet_group(name, l, block_func, features, count, stride):
with tf.variable_scope(name):
for i in range(0, count):
with tf.variable_scope('block{}'.format(i)):
l = block_func(l, features, stride if i == 0 else 1)
return l
def resnet_c4_backbone(image, num_blocks):
assert len(num_blocks) == 3
freeze_at = cfg.BACKBONE.FREEZE_AT
with backbone_scope(freeze=freeze_at > 0):
l = tf.pad(image, [[0, 0], [0, 0], maybe_reverse_pad(2, 3), maybe_reverse_pad(2, 3)])
l = Conv2D('conv0', l, 64, 7, strides=2, padding='VALID')
l = tf.pad(l, [[0, 0], [0, 0], maybe_reverse_pad(0, 1), maybe_reverse_pad(0, 1)])
l = MaxPooling('pool0', l, 3, strides=2, padding='VALID')
with backbone_scope(freeze=freeze_at > 1):
c2 = resnet_group('group0', l, resnet_bottleneck, 64, num_blocks[0], 1)
with backbone_scope(freeze=False):
c3 = resnet_group('group1', c2, resnet_bottleneck, 128, num_blocks[1], 2)
c4 = resnet_group('group2', c3, resnet_bottleneck, 256, num_blocks[2], 2)
# 16x downsampling up to now
return c4
@auto_reuse_variable_scope
def resnet_conv5(image, num_block):
with backbone_scope(freeze=False):
l = resnet_group('group3', image, resnet_bottleneck, 512, num_block, 2)
return l
def resnet_fpn_backbone(image, num_blocks):
freeze_at = cfg.BACKBONE.FREEZE_AT
shape2d = tf.shape(image)[2:]
mult = float(cfg.FPN.RESOLUTION_REQUIREMENT)
new_shape2d = tf.cast(tf.ceil(tf.cast(shape2d, tf.float32) / mult) * mult, tf.int32)
pad_shape2d = new_shape2d - shape2d
assert len(num_blocks) == 4, num_blocks
with backbone_scope(freeze=freeze_at > 0):
chan = image.shape[1]
pad_base = maybe_reverse_pad(2, 3)
l = tf.pad(image, tf.stack(
[[0, 0], [0, 0],
[pad_base[0], pad_base[1] + pad_shape2d[0]],
[pad_base[0], pad_base[1] + pad_shape2d[1]]]))
l.set_shape([None, chan, None, None])
l = Conv2D('conv0', l, 64, 7, strides=2, padding='VALID')
l = tf.pad(l, [[0, 0], [0, 0], maybe_reverse_pad(0, 1), maybe_reverse_pad(0, 1)])
l = MaxPooling('pool0', l, 3, strides=2, padding='VALID')
with backbone_scope(freeze=freeze_at > 1):
c2 = resnet_group('group0', l, resnet_bottleneck, 64, num_blocks[0], 1)
with backbone_scope(freeze=False):
c3 = resnet_group('group1', c2, resnet_bottleneck, 128, num_blocks[1], 2)
c4 = resnet_group('group2', c3, resnet_bottleneck, 256, num_blocks[2], 2)
c5 = resnet_group('group3', c4, resnet_bottleneck, 512, num_blocks[3], 2)
# 32x downsampling up to now
# size of c5: ceil(input/32)
return c2, c3, c4, c5
|
the-stack_106_13910
|
# package for doing multi threading on specific apis
from concurrent.futures import ThreadPoolExecutor
from tsv_data_analytics import tsv
from tsv_data_analytics import tsvutils
from tsv_data_analytics import utils
import math
import time
class MultiThreadTSV(tsv.TSV):
def __init__(self, header, data, num_par = 1, status_check_interval_sec = 10, sleep_interval_sec = 0.01):
super().__init__(header, data)
self.num_par = num_par
self.status_check_interval_sec = status_check_interval_sec
self.sleep_interval_sec = sleep_interval_sec
# check if num_par is more than number of rows
if (self.num_rows() < self.num_par):
utils.warn("MultiThreadTSV: num_rows: {} < num_par: {}. Adjusting the value".format(self.num_rows(), self.num_par))
self.num_par = self.num_rows()
def parallelize(self, func, *args, **kwargs):
# debug
utils.debug("parallelize: func: {}, args: {}, kwargs: {}".format(func, *args, **kwargs))
# split the data into num_par partitions
batch_size = int(math.ceil(self.num_rows() / self.num_par))
future_results = []
# take start_time
ts_start = time.time()
# check for single threaded
if (self.num_par == 1):
combined_result = __parallelize__(self, func, *args, **kwargs)
else:
# run thread pool
with ThreadPoolExecutor(max_workers = self.num_par) as executor:
# execute batches concurrently based on num_par and batch_size
for i in range(self.num_par):
batch_i = self.skip(batch_size * i).take(batch_size)
future_results.append(executor.submit(__parallelize__, batch_i, func, *args, **kwargs))
# run while loop
while True:
done_count = 0
for f in future_results:
if (f.done() == True):
done_count = done_count + 1
# debug
utils.debug("parallelize: done_count: {}, total: {}".format(done_count, len(future_results)))
# check if all are done
if (done_count < len(future_results)):
# sleep for some additional time to allow notebook stop method to work
utils.debug("parallelize: futures not completed yet. Sleeping for {} seconds".format(self.status_check_interval_sec))
time.sleep(self.status_check_interval_sec)
else:
break
# combine the results
results = []
for f in future_results:
results.append(f.result())
# merge the tsvs using a common union.
combined_result = tsvutils.merge(results, merge_def_vals = {})
# take end_time
ts_end = time.time()
utils.debug("parallelize: time taken: {} sec".format((ts_end - ts_start)))
return combined_result
def __parallelize__(xtsv, func, *args, **kwargs):
return func(xtsv, *args, **kwargs)
|
the-stack_106_13911
|
# !/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from polyaxon import logger
from polyaxon.tracking.events.events_processors import metrics_dict_to_list
try:
import psutil
except ImportError:
psutil = None
try:
import pynvml
except ImportError:
pynvml = None
def can_log_gpu_resources():
if pynvml is None:
return False
try:
pynvml.nvmlInit()
return True
except pynvml.NVMLError:
return False
def query_gpu(handle: int) -> Dict:
memory = pynvml.nvmlDeviceGetMemoryInfo(handle) # in Bytes
utilization = pynvml.nvmlDeviceGetUtilizationRates(handle)
return {
"gpu_{}_memory_free".format(handle): int(memory.free),
"gpu_{}_memory_used".format(handle): int(memory.used),
"gpu_{}_utilization".format(handle): utilization.gpu,
}
def get_gpu_metrics() -> List:
try:
pynvml.nvmlInit()
device_count = pynvml.nvmlDeviceGetCount()
results = []
for i in range(device_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
results += metrics_dict_to_list(query_gpu(handle))
return results
except pynvml.NVMLError:
logger.debug("Failed to collect gpu resources", exc_info=True)
return []
|
the-stack_106_13912
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn_scratch.py',
'../_base_/datasets/cityscapes_detection_durand.py',
'../_base_/default_runtime.py'
]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
evaluation = dict(interval=1, classwise=True)
optimizer_config = dict(grad_clip=None)
optimizer = dict(
type='SGD',
lr=0.02,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.1,
step=[11])
runner = dict(
type='EpochBasedRunner', max_epochs=13)
log_config = dict(
interval=100,
hooks=[
dict(type='TensorboardLoggerHook'),
dict(type='TextLoggerHook'),
]
)
work_dir = "/home/ihakki/h3dr/experiments/faster_rcnn_optexp/run_scratch_8"
gpu_ids = range(0, 1)
|
the-stack_106_13913
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_convert_to_and_from_dict_async.py
DESCRIPTION:
This sample demonstrates how to convert models returned from an analyze operation
to and from a dictionary. The dictionary in this sample is then converted to a
JSON file, then the same dictionary is converted back to its original model.
USAGE:
python sample_convert_to_and_from_dict_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
import json
import asyncio
async def convert_to_and_from_dict_async():
path_to_sample_documents = os.path.abspath(
os.path.join(
os.path.abspath(__file__),
"..",
"..",
"..",
"./sample_forms/forms/Form_1.jpg",
)
)
from azure.core.serialization import AzureJSONEncoder
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentAnalysisClient
from azure.ai.formrecognizer import AnalyzeResult
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
document_analysis_client = DocumentAnalysisClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with document_analysis_client:
with open(path_to_sample_documents, "rb") as f:
poller = await document_analysis_client.begin_analyze_document(
"prebuilt-document", document=f
)
result = await poller.result()
# convert the received model to a dictionary
analyze_result_dict = result.to_dict()
# save the dictionary as JSON content in a JSON file, use the AzureJSONEncoder
# to help make types, such as dates, JSON serializable
# NOTE: AzureJSONEncoder is only available with azure.core>=1.18.0.
with open('data.json', 'w') as f:
json.dump(analyze_result_dict, f, cls=AzureJSONEncoder)
# convert the dictionary back to the original model
model = AnalyzeResult.from_dict(analyze_result_dict)
# use the model as normal
print("----Converted from dictionary AnalyzeResult----")
print("Model ID: '{}'".format(model.model_id))
print("Number of pages analyzed {}".format(len(model.pages)))
print("API version used: {}".format(model.api_version))
print("----------------------------------------")
async def main():
await convert_to_and_from_dict_async()
if __name__ == '__main__':
asyncio.run(main())
|
the-stack_106_13914
|
# Encoding: UTF-8
u"""Automatic documentation generation for pokédex tables
This adds a "dex-table" directive to Sphinx, which works like "autoclass",
but documents Pokédex mapped classes.
"""
# XXX: This assumes all the tables are in pokedex.db.tables
import functools
import textwrap
from docutils import nodes
from docutils.statemachine import ViewList
from sphinx.util.compat import Directive, make_admonition
from sphinx.locale import _
from sphinx.domains.python import PyClasslike
from sphinx.util.docfields import Field, GroupedField, TypedField
from sphinx.ext.autodoc import ClassLevelDocumenter
from sqlalchemy import types
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.properties import RelationshipProperty
from sqlalchemy.orm import Mapper, configure_mappers
from sqlalchemy.ext.associationproxy import AssociationProxy
from pokedex.db.markdown import MoveEffectPropertyMap, MoveEffectProperty
from pokedex.db import tables, markdown
# Make sure all the backrefs are in place
configure_mappers()
column_to_cls = {}
for cls in tables.mapped_classes:
for column in cls.__table__.c:
column_to_cls[column] = cls
class dextabledoc(nodes.Admonition, nodes.Element):
pass
def visit_todo_node(self, node):
self.visit_admonition(node)
def depart_todo_node(self, node):
self.depart_admonition(node)
def column_type_str(column):
"""Extract the type name from a SQLA column
"""
type_ = column.type
# We're checking the specific type here: no issubclass
if type(type_) in (types.Integer, types.SmallInteger):
return 'int'
if type(type_) == types.Boolean:
return 'bool'
if type(type_) == types.Unicode:
return u'unicode – %s' % column.info['format']
if type(type_) == types.UnicodeText:
return u'unicode – %s' % column.info['format']
if type(type_) == types.Enum:
return 'enum: [%s]' % ', '.join(type_.enums)
if type(type_) == markdown.MarkdownColumn:
return 'markdown'
raise ValueError(repr(type_))
common_columns = 'id identifier name'.split()
def column_header(c, class_name=None, transl_name=None, show_type=True,
relation=None, relation_name=None):
"""Return the column header for the given column"""
result = []
if relation_name:
name = relation_name
else:
name = c.name
if class_name:
result.append(u'%s.\ **%s**' % (class_name, name))
else:
result.append(u'**%s**' % c.name)
if c.foreign_keys:
for fk in c.foreign_keys:
if fk.column in column_to_cls:
foreign_cls = column_to_cls[fk.column]
if relation_name and relation_name + '_id' == c.name:
result.append(u'(%s →' % c.name)
elif relation_name:
result.append(u'(**%s** →' % c.name)
else:
result.append(u'(→')
result.append(u':class:`~pokedex.db.tables.%s`.%s)' % (
foreign_cls.__name__,
fk.column.name
))
break
elif show_type:
result.append(u'(*%s*)' % column_type_str(c))
if transl_name:
result.append(u'via *%s*' % transl_name)
return ' '.join(result)
def with_header(header=None):
"""Decorator that adds a section header if there's a any output
The decorated function should yield output lines; if there are any the
header gets added.
"""
def wrap(func):
@functools.wraps(func)
def wrapped(cls, remaining_attrs):
result = list(func(cls, remaining_attrs))
if result:
# Sphinx/ReST doesn't allow "-----" just anywhere :(
yield u''
yield u'.. raw:: html'
yield u''
yield u' <hr>'
yield u''
if header:
yield header + u':'
yield u''
for row in result:
yield row
return wrapped
return wrap
### Section generation functions
def generate_table_header(cls, remaining_attrs):
first_line, sep, next_lines = unicode(cls.__doc__).partition(u'\n')
yield first_line
for line in textwrap.dedent(next_lines).split('\n'):
yield line
yield ''
yield u'Table name: *%s*' % cls.__tablename__
try:
yield u'(single: *%s*)' % cls.__singlename__
except AttributeError:
pass
yield u''
yield u'Primary key: %s.' % u', '.join(
u'**%s**' % col.key for col in cls.__table__.primary_key.columns)
yield u''
def generate_common(cls, remaining_attrs):
common_col_headers = []
for c in cls.__table__.c:
if c.name in common_columns:
common_col_headers.append(column_header(c, show_type=False))
remaining_attrs.remove(c.name)
for translation_class in cls.translation_classes:
for c in translation_class.__table__.c:
if c.name in common_columns:
common_col_headers.append(column_header(c, None,
translation_class.__table__.name, show_type=False))
remaining_attrs.remove(c.name)
if common_col_headers:
if len(common_col_headers) > 1:
common_col_headers[-1] = 'and ' + common_col_headers[-1]
if len(common_col_headers) > 2:
separator = u', '
else:
separator = u' '
yield u'Has'
yield separator.join(common_col_headers) + '.'
yield u''
@with_header(u'Columns')
def generate_columns(cls, remaining_attrs):
name = cls.__name__
for c in [c for c in cls.__table__.c if c.name not in common_columns]:
remaining_attrs.remove(c.name)
relation_name = c.name[:-3]
if c.name.endswith('_id') and relation_name in remaining_attrs:
relation = getattr(cls, relation_name)
yield column_header(c, name,
relation=relation, relation_name=relation_name)
remaining_attrs.remove(relation_name)
else:
yield column_header(c, name) + ':'
yield u''
if c.doc:
yield u' ' + unicode(c.doc)
yield u''
@with_header(u'Internationalized strings')
def generate_strings(cls, remaining_attrs):
for translation_class in cls.translation_classes:
for c in translation_class.__table__.c:
if 'format' in c.info:
remaining_attrs.discard(c.name)
remaining_attrs.discard(c.name + '_map')
if c.name in common_columns:
continue
yield column_header(c, cls.__name__,
translation_class.__table__.name)
yield u''
if c.doc:
yield u' ' + unicode(c.doc)
yield u''
@with_header(u'Relationships')
def generate_relationships(cls, remaining_attrs):
def isrelationship(prop):
return isinstance(prop, InstrumentedAttribute) and isinstance(prop.property, RelationshipProperty)
for attr_name in sorted(remaining_attrs):
prop = getattr(cls, attr_name)
if not isrelationship(prop):
continue
rel = prop.property
yield u'%s.\ **%s**' % (cls.__name__, attr_name)
class_name = u':class:`~pokedex.db.tables.%s`' % rel.mapper.class_.__name__
if rel.uselist:
class_name = u'[%s]' % class_name
yield u'(→ %s)' % class_name
if rel.doc:
yield u''
yield u' ' + unicode(rel.doc)
if rel.secondary is not None:
yield u''
yield ' Association table: ``%s``' % rel.secondary
#if rel.primaryjoin is not None:
# yield u''
# yield ' Join condition: ``%s``' % rel.primaryjoin
# if rel.secondaryjoin is not None:
# yield ' , ``%s``' % rel.secondaryjoin
if rel.order_by:
yield u''
yield u' '
yield ' Ordered by: ' + u', '.join(
u'``%s``' % o for o in rel.order_by)
yield u''
remaining_attrs.remove(attr_name)
@with_header(u'Association Proxies')
def generate_associationproxies(cls, remaining_attrs):
for attr_name in sorted(remaining_attrs):
prop = getattr(cls, attr_name)
if isinstance(prop, AssociationProxy):
yield u'%s.\ **%s**:' % (cls.__name__, attr_name)
yield '``{prop.remote_attr.key}`` of ``self.{prop.local_attr.key}``'.format(
prop=prop)
yield u''
remaining_attrs.remove(attr_name)
@with_header(u'Undocumented')
def generate_undocumented(cls, remaining_attrs):
for c in sorted([c for c in remaining_attrs if isinstance(getattr(cls, c),
(InstrumentedAttribute, AssociationProxy,
MoveEffectPropertyMap, MoveEffectProperty))]):
yield u''
yield u'%s.\ **%s**' % (cls.__name__, c)
remaining_attrs.remove(c)
@with_header(None)
def generate_other(cls, remaining_attrs):
for c in sorted(remaining_attrs):
yield u''
member = getattr(cls, c)
if callable(member):
yield '.. automethod:: %s.%s' % (cls.__name__, c)
else:
yield '.. autoattribute:: %s.%s' % (cls.__name__, c)
yield u''
remaining_attrs.clear()
class DexTable(PyClasslike):
"""The actual Sphinx documentation generation whatchamacallit
"""
doc_field_types = [
TypedField('field', label='Fields',
typerolename='obj', typenames=('fieldname', 'type')),
]
def get_signature_prefix(self, sig):
return ''
#return u'mapped class '
def run(self):
section = nodes.section()
super_result = super(DexTable, self).run()
title_text = self.names[0][0]
section += nodes.title(text=title_text)
section += super_result
section['ids'] = ['dex-table-%s' % title_text.lower()]
return [section]
def before_content(self):
name = self.names[0][0]
for cls in tables.mapped_classes:
if name == cls.__name__:
break
else:
raise ValueError('Table %s not found' % name)
table = cls.__table__
remaining_attrs = set(x for x in dir(cls) if not x.startswith('_'))
remaining_attrs.difference_update(['metadata', 'translation_classes',
'add_relationships', 'summary_column'])
for transl_class in cls.translation_classes:
remaining_attrs.difference_update([
transl_class.relation_name,
transl_class.relation_name + '_table',
transl_class.relation_name + '_local',
])
generated_content = [] # Just a list of lines!
generated_content.extend(generate_table_header(cls, remaining_attrs))
generated_content.extend(generate_common(cls, remaining_attrs))
generated_content.extend(generate_columns(cls, remaining_attrs))
generated_content.extend(generate_strings(cls, remaining_attrs))
generated_content.extend(generate_relationships(cls, remaining_attrs))
generated_content.extend(generate_associationproxies(cls, remaining_attrs))
generated_content.extend(generate_undocumented(cls, remaining_attrs))
generated_content.extend(generate_other(cls, remaining_attrs))
generated_content.append(u'')
self.content = ViewList(generated_content + list(self.content))
return super(DexTable, self).before_content()
def get_index_text(self, modname, name_cls):
return '%s (mapped class)' % name_cls[0]
def setup(app):
app.add_directive('dex-table', DexTable)
# XXX: Specify that this depends on pokedex.db.tables ...?
|
the-stack_106_13915
|
# encoding: utf-8
# This file contains commonly used parts of external libraries. The idea is
# to help in removing helpers from being used as a dependency by many files
# but at the same time making it easy to change for example the json lib
# used.
#
# NOTE: This file is specificaly created for
# from ckan.common import x, y, z to be allowed
from __future__ import annotations
import logging
from collections.abc import MutableMapping
from typing import (
Any, Iterable, Optional, TYPE_CHECKING,
TypeVar, cast, overload, Container, Union)
from typing_extensions import Literal
import flask
from werkzeug.local import Local, LocalProxy
from flask_babel import (gettext as flask_ugettext,
ngettext as flask_ungettext)
import simplejson as json # type: ignore # noqa: re-export
import ckan.lib.maintain as maintain
from ckan.config.declaration import Declaration, Key
if TYPE_CHECKING:
# starting from python 3.7 the following line can be used without any
# conditions after `annotation` import from `__future__`
MutableMapping = MutableMapping[str, Any]
log = logging.getLogger(__name__)
current_app = flask.current_app
@maintain.deprecated('All web requests are served by Flask', since="2.10.0")
def is_flask_request():
u'''
This function is deprecated. All CKAN requests are now served by Flask
'''
return True
def streaming_response(data: Iterable[Any],
mimetype: str = u'application/octet-stream',
with_context: bool = False) -> flask.Response:
iter_data = iter(data)
if with_context:
iter_data = flask.stream_with_context(iter_data)
resp = flask.Response(iter_data, mimetype=mimetype)
return resp
def ugettext(*args: Any, **kwargs: Any) -> str:
return cast(str, flask_ugettext(*args, **kwargs))
_ = ugettext
def ungettext(*args: Any, **kwargs: Any) -> str:
return cast(str, flask_ungettext(*args, **kwargs))
class CKANConfig(MutableMapping):
u'''Main CKAN configuration object
This is a dict-like object that also proxies any changes to the
Flask and Pylons configuration objects.
The actual `config` instance in this module is initialized in the
`load_environment` method with the values of the ini file or env vars.
'''
store: dict[str, Any]
def __init__(self, *args: Any, **kwargs: Any):
self.store = dict()
self.update(dict(*args, **kwargs))
def __getitem__(self, key: str):
return self.store[key]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __repr__(self):
return self.store.__repr__()
def copy(self) -> dict[str, Any]:
return self.store.copy()
def clear(self) -> None:
self.store.clear()
try:
flask.current_app.config.clear()
except RuntimeError:
pass
def __setitem__(self, key: str, value: Any):
self.store[key] = value
try:
flask.current_app.config[key] = value
except RuntimeError:
pass
def __delitem__(self, key: str):
del self.store[key]
try:
del flask.current_app.config[key]
except RuntimeError:
pass
def get_value(self, key: str) -> Any:
if self.get("config.mode") == "strict":
return self[key]
option = config_declaration.get(key)
if not option:
log.warning("Option %s is not declared", key)
return self.get(key)
value = self.get(key, option.default)
return option._normalize(value)
def subset(
self, pattern: Key,
exclude: Optional[Container[Union[str, Key]]] = None
) -> dict[str, Any]:
subset = {}
exclude = exclude or set()
for k, v in self.store.items():
if k in exclude or pattern != k:
continue
subset[k] = v
return subset
def _get_request():
return flask.request
class CKANRequest(LocalProxy):
u'''Common request object
This is just a wrapper around LocalProxy so we can handle some special
cases for backwards compatibility.
'''
@property
@maintain.deprecated('Use `request.args` instead of `request.params`',
since="2.10.0")
def params(self):
'''This property is deprecated.
Special case as Pylons' request.params is used all over the place. All
new code meant to be run just in Flask (eg views) should always use
request.args
'''
return cast(flask.Request, self).args
def _get_c():
return flask.g
def _get_session():
return flask.session
def asbool(obj: Any) -> bool:
if isinstance(obj, str):
obj = obj.strip().lower()
if obj in truthy:
return True
elif obj in falsy:
return False
else:
raise ValueError(u"String is not true/false: {}".format(obj))
return bool(obj)
def asint(obj: Any) -> int:
try:
return int(obj)
except (TypeError, ValueError):
raise ValueError(u"Bad integer value: {}".format(obj))
T = TypeVar('T')
SequenceT = TypeVar('SequenceT', "list[Any]", "tuple[Any]")
@overload
def aslist(obj: str,
sep: Optional[str] = None,
strip: bool = True) -> list[str]:
...
@overload
def aslist(obj: list[T],
sep: Optional[str] = None,
strip: bool = True) -> list[T]:
...
@overload
def aslist(obj: tuple[T],
sep: Optional[str] = None,
strip: bool = True) -> tuple[T]:
...
@overload
def aslist(obj: SequenceT,
sep: Optional[str] = None,
strip: bool = True) -> SequenceT:
...
@overload
def aslist(obj: Literal[None],
sep: Optional[str] = None,
strip: bool = True) -> list[str]:
...
def aslist(obj: Any, sep: Optional[str] = None, strip: bool = True) -> Any:
if isinstance(obj, str):
lst = obj.split(sep)
if strip:
lst = [v.strip() for v in lst]
return lst
elif isinstance(obj, (list, tuple)):
return cast(Any, obj)
elif obj is None:
return []
else:
return [obj]
local = Local()
# This a proxy to the bounded config object
local(u'config')
# Thread-local safe objects
config = local.config = CKANConfig()
local("config_declaration")
config_declaration = local.config_declaration = Declaration()
# Proxies to already thread-local safe objects
request = cast(flask.Request, CKANRequest(_get_request))
# Provide a `c` alias for `g` for backwards compatibility
g: Any = LocalProxy(_get_c)
c = g
session: Any = LocalProxy(_get_session)
truthy = frozenset([u'true', u'yes', u'on', u'y', u't', u'1'])
falsy = frozenset([u'false', u'no', u'off', u'n', u'f', u'0'])
|
the-stack_106_13916
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test seq-to-seq training."""
# pytype: disable=wrong-arg-count
# pytype: disable=attribute-error
import functools
import os
import random
import sys
from absl.testing import absltest
from flax import jax_utils
from flax import optim
from flax.training import common_utils
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow.compat.v2 as tf
from latent_programmer import models
from latent_programmer import train_lib
from latent_programmer.tasks.robust_fill import dsl
from latent_programmer.tasks.robust_fill import tokens as dsl_tokens
from latent_programmer.tasks.robust_fill.dataset import input_pipeline
gfile = tf.io.gfile
sys.path.append('..')
class TrainTest(absltest.TestCase):
def test_train(self):
tf.enable_v2_behavior()
tf.random.set_seed(0)
np.random.seed(0)
random.seed(0)
dataset_filepattern = os.path.join(
os.path.dirname(__file__),
'tasks/robust_fill/dataset/test_dataset/program_tasks.tf_records-*')
print('dataset_filepattern = {}'.format(dataset_filepattern))
batch_size = 4
num_strings_per_task = 4
max_characters = 10
max_program_length = 15
# Build token tables.
id_char_table = {i+1: char for (i, char) in enumerate(dsl.CHARACTER)}
char_id_table = {char: id for id, char in id_char_table.items()}
_, token_id_table = dsl_tokens.build_token_tables()
io_vocab_size = len(char_id_table) + 1 # For padding.
program_vocab_size = len(token_id_table) + 1
bos_token = token_id_table[dsl.BOS]
# Load dataset.
dataset = input_pipeline.create_dataset_from_tf_record(
dataset_filepattern, token_id_table, char_id_table)
dataset = dataset.padded_batch(
batch_size,
padded_shapes=((num_strings_per_task, max_characters),
(num_strings_per_task, max_characters),
(max_program_length,)),
drop_remainder=True)
dataset_iter = dataset.repeat().as_numpy_iterator()
train_config = models.TransformerConfig(
vocab_size=io_vocab_size,
output_vocab_size=program_vocab_size,
shift=True,
emb_dim=32,
num_heads=4,
num_layers=2,
qkv_dim=32,
mlp_dim=32,
max_len=max(max_characters, max_program_length),
deterministic=False,
decode=False,
bos_token=bos_token)
eval_config = train_config.replace(deterministic=True)
rng = jax.random.PRNGKey(0)
rng, init_rng = jax.random.split(rng)
m = models.ProgramTransformer(eval_config)
initial_variables = jax.jit(m.init)(
init_rng,
jnp.ones((batch_size, num_strings_per_task, max_characters),
jnp.float32),
jnp.ones((batch_size, num_strings_per_task, max_characters),
jnp.float32),
jnp.ones((batch_size, max_program_length), jnp.float32))
optimizer_def = optim.Adam(
1e-2,
beta1=0.9,
beta2=0.98,
eps=1e-9,
weight_decay=0.1)
optimizer = optimizer_def.create(initial_variables['params'])
del initial_variables # Don't keep a copy of the initial model.
optimizer = jax_utils.replicate(optimizer)
learning_rate_fn = train_lib.create_learning_rate_scheduler(
base_learning_rate=1e-2)
p_train_step = jax.pmap(
functools.partial(
train_lib.train_step,
learning_rate_fn=learning_rate_fn,
config=train_config),
axis_name='batch')
p_eval_step = jax.pmap(
functools.partial(train_lib.eval_step, config=eval_config),
axis_name='batch')
# Training loop.
start_step = 0
rngs = jax.random.split(rng, jax.local_device_count())
del rng
for _ in range(start_step, 1000):
inputs, outputs, programs = common_utils.shard(next(dataset_iter))
optimizer, _, rngs = p_train_step(
optimizer, inputs, outputs, programs, train_rng=rngs)
# Evaluation.
eval_metrics = []
for batches in dataset.as_numpy_iterator():
inputs, outputs, programs = common_utils.shard(batches)
metrics = p_eval_step(optimizer.target, inputs, outputs, programs)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums)
if jax.host_id() == 0:
self.assertGreater(eval_summary['accuracy'], 0.1)
if __name__ == '__main__':
absltest.main()
|
the-stack_106_13917
|
from pathlib import Path
import time
import subprocess
import os
import stat
from abc import ABC, abstractmethod
import pypipegraph as ppg
from .util import lazy_property, sort_versions
_global_store = None
def change_global_store(new_store):
global _global_store
_global_store = new_store
def get_global_store():
return _global_store
class DownloadDiscrepancyException(ValueError):
pass
def reproducible_tar(target_tar, folder, cwd):
"""Create tars that look the same every time."""
# see http://h2.jaguarpaw.co.uk/posts/reproducible-tar/
target_tar = str(target_tar)
folder = str(folder)
cmd = [
"tar",
"--format=posix",
"--pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime,delete=mtime",
"--mtime=1970-01-01 00:00:00Z",
"--sort=name",
"--numeric-owner",
"--owner=0",
"--group=0",
"--mode=go+rwX,u+rwX",
"-cvf",
target_tar,
folder,
]
subprocess.check_call(cmd, cwd=cwd)
class ExternalAlgorithm(ABC):
"""Together with an ExternalAlgorithmStore (or the global one),
ExternalAlgorithm encapsulates a callable algorithm such as a high throughput aligner.
"""
def __init__(self, version="_last_used", store=None, **kwargs):
"""
Parameters
----------
version: str
either one of the available versions from the store,
_latest (always the latest!) or
_last_used - the last used one, or the newes if this is the first time
(stored '.mbf_external_versions' )
"""
super().__init__(**kwargs)
if store is None:
store = _global_store
self.store = store
if version == "_last_used":
actual_version = self._last_used_version
if actual_version is None:
actual_version = "_latest"
else:
actual_version = version
if actual_version == "_latest":
self.version = self.get_latest_version()
self._fetch_and_check(self.version)
elif actual_version == "_fetching": # pragma: no cover
self.version = "_fetching"
else:
if actual_version in store.get_available_versions(self.name):
self.version = actual_version
else:
self._fetch_and_check(actual_version)
self.version = actual_version
self._store_used_version()
self.path = store.get_unpacked_path(self.name, self.version)
@lazy_property
def _last_used_version(self):
try:
lines = Path(".mbf_external_versions").read_text().strip().split("\n")
for l in lines:
if l.strip():
name, version = l.split("==")
if name == self.name:
return version
except OSError:
pass
return None
def _store_used_version(self):
last_used = self._last_used_version
if (
last_used is None
or sort_versions([last_used, self.version])[0] == last_used
):
try:
p = Path(".mbf_external_versions")
lines = p.read_text().strip().split("\n")
lines = [x for x in lines if not x.startswith(self.name + "==")]
except OSError:
lines = []
lines.append(f"{self.name}=={self.version}")
p.write_text("\n".join(lines) + "\n")
@property
@abstractmethod
def name(self):
pass # pragma: no cover
@abstractmethod
def build_cmd(self, output_directory, ncores, arguments):
pass # pragma: no cover
@property
def multi_core(self):
return False
def run(
self,
output_directory,
arguments=None,
cwd=None,
call_afterwards=None,
additional_files_created=None,
):
"""Return a job that runs the algorithm and puts the
results in output_directory.
Note that assigning different ouput_directories to different
versions is your problem.
"""
output_directory = Path(output_directory)
output_directory.mkdir(parents=True, exist_ok=True)
sentinel = output_directory / "sentinel.txt"
filenames = [sentinel]
if additional_files_created:
if isinstance(additional_files_created, (str, Path)):
additional_files_created = [additional_files_created]
filenames.extend(additional_files_created)
job = ppg.MultiFileGeneratingJob(
filenames,
self.get_run_func(
output_directory, arguments, cwd=cwd, call_afterwards=call_afterwards
),
).depends_on(
ppg.FileChecksumInvariant(
self.store.get_zip_file_path(self.name, self.version)
),
ppg.FunctionInvariant(str(sentinel) + "_call_afterwards", call_afterwards),
)
job.ignore_code_changes()
job.depends_on(
ppg.FunctionInvariant(
job.job_id + "_build_cmd_func", self.__class__.build_cmd
)
)
if self.multi_core:
job.cores_needed = -1
return job
def get_run_func(self, output_directory, arguments, cwd=None, call_afterwards=None):
def do_run():
self.store.unpack_version(self.name, self.version)
sentinel = output_directory / "sentinel.txt"
stdout = output_directory / "stdout.txt"
stderr = output_directory / "stderr.txt"
cmd_out = output_directory / "cmd.txt"
op_stdout = open(stdout, "wb")
op_stderr = open(stderr, "wb")
cmd = [
str(x)
for x in self.build_cmd(
output_directory,
ppg.util.global_pipegraph.rc.cores_available
if self.multi_core
else 1,
arguments,
)
]
cmd_out.write_text(repr(cmd))
start_time = time.time()
print(" ".join(cmd))
p = subprocess.Popen(cmd, stdout=op_stdout, stderr=op_stderr, cwd=cwd)
p.communicate()
op_stdout.close()
op_stderr.close()
ok = self.check_success(
p.returncode, stdout.read_bytes(), stderr.read_bytes()
)
if ok is True:
runtime = time.time() - start_time
sentinel.write_text(
f"run time: {runtime:.2f} seconds\nreturn code: {p.returncode}"
)
if call_afterwards is not None:
call_afterwards()
else:
raise ValueError(
f"{self.name} run failed. Error was: {ok}. Cmd was: {cmd}"
)
return do_run
def check_success(self, return_code, stdout, stderr):
if return_code == 0:
return True
else:
return f"Return code != 0: {return_code}"
def _fetch_and_check(self, version):
if self.store.no_downloads:
print("WARNING: Downloads disabled for this store")
return
target_filename = self.store.get_zip_file_path(self.name, version).absolute()
if target_filename.exists():
return
self.fetch_version(version, target_filename)
try:
checksum = ppg.util.checksum_file(target_filename)
except OSError: # pragma: no cover
raise ValueError("Algorithm did not download correctly")
md5_file = target_filename.with_name(target_filename.name + ".md5sum")
st = os.stat(target_filename)
with open(md5_file, "wb") as op:
op.write(checksum.encode("utf-8"))
os.utime(md5_file, (st[stat.ST_MTIME], st[stat.ST_MTIME]))
self._check_hash_against_others(target_filename, checksum)
def _check_hash_against_others(self, target_filename, checksum):
"""See if another machine has downloaded the file and synced it's mbf_store.
If so, look at it's hash. If it differs, throw an Exception"""
search_path = self.store.zip_path.absolute().parent.parent.parent
print(search_path)
search_key = "**/" + self.store.zip_path.name + "/" + target_filename.name
by_hash = {checksum: [target_filename]}
for found in search_path.glob(search_key):
print("found", found)
if found != target_filename:
cs = ppg.util.checksum_file(found)
if not cs in by_hash:
by_hash[cs] = []
by_hash[cs].append(found)
if len(by_hash) > 1:
import pprint
pprint.pprint(by_hash)
raise DownloadDiscrepancyException(
f"Found multiple different {target_filename.name} with different md5sum. Investitage and fix (possibly using reproducible_tar), please."
)
def fetch_version(self, version, target_filename): # pragma: no cover
# overwrite this in the downstream algorithms
raise NotImplementedError()
pass
class ExternalAlgorithmStore:
def __init__(self, zip_path, unpack_path, no_downloads=False):
self.zip_path = Path(zip_path)
self.unpack_path = Path(unpack_path)
self.no_downloads = no_downloads
self._version_cache = {}
def get_available_versions(self, algorithm_name):
if (
not algorithm_name in self._version_cache
or not self._version_cache[algorithm_name]
):
glob = f"{algorithm_name}__*.tar.gz"
matching = list(self.zip_path.glob(glob))
versions = [x.stem[x.stem.find("__") + 2 : -4] for x in matching]
self._version_cache[algorithm_name] = sort_versions(versions)
return self._version_cache[algorithm_name]
def unpack_version(self, algorithm_name, version):
if not version in self.get_available_versions(algorithm_name):
raise ValueError("No such version")
target_path = self.get_unpacked_path(algorithm_name, version)
sentinel = target_path / "unpack_done.txt"
if sentinel.exists():
return
target_path.mkdir(parents=True, exist_ok=True)
gzip_path = self.get_zip_file_path(algorithm_name, version)
subprocess.check_call(["tar", "-xf", gzip_path], cwd=target_path)
sentinel.write_text("Done")
def get_unpacked_path(self, algorithm_name, version):
return self.unpack_path / algorithm_name / version
def get_zip_file_path(self, algorithm_name, version):
return self.zip_path / (algorithm_name + "__" + version + ".tar.gz")
|
the-stack_106_13918
|
from __future__ import annotations
from neo3 import contracts, storage, vm
from neo3.network import payloads
from neo3.core import types, cryptography, IInteroperable, serialization, to_script_hash
from typing import Any, Dict, cast, List, Tuple, Type, Optional, Callable, Union
import enum
from dataclasses import dataclass
from contextlib import suppress
class ApplicationEngine(vm.ApplicationEngineCpp):
#: Amount of free GAS added to the engine.
GAS_FREE = 0
#: Maximum length of event names for "System.Runtime.Notify" SYSCALLs.
MAX_EVENT_SIZE = 32
#: Maximum messasge length for "System.Runtime.Log" SYSCALLs.
MAX_NOTIFICATION_SIZE = 1024
#: Maximum size of the smart contract script.
MAX_CONTRACT_LENGTH = 1024 * 1024
def __init__(self,
trigger: contracts.TriggerType,
container: payloads.IVerifiable,
snapshot: storage.Snapshot,
gas: int,
test_mode: bool = False
):
# Do not use super() version, see
# https://pybind11.readthedocs.io/en/master/advanced/classes.html#overriding-virtual-functions-in-python
vm.ApplicationEngineCpp.__init__(self, test_mode)
#: A ledger snapshot to use for syscalls such as "System.Blockchain.GetHeight".
self.snapshot = snapshot
#: The trigger to run the engine with.
self.trigger = trigger
#: A flag to toggle infinite gas
self.is_test_mode = test_mode
self.script_container = container
#: Gas available for consumption by the engine while executing its script.
self.gas_amount = self.GAS_FREE + gas
self._invocation_counter: Dict[types.UInt160, int] = {}
#: Notifications (Notify SYSCALLs) that occured while executing the script.
self.notifications: List[Tuple[payloads.IVerifiable, types.UInt160, bytes, vm.ArrayStackItem]] = []
if self.snapshot is None or self.snapshot.persisting_block is None or self.snapshot.persisting_block.index == 0:
self.exec_fee_factor = contracts.PolicyContract().DEFAULT_EXEC_FEE_FACTOR
self.STORAGE_PRICE = contracts.PolicyContract().DEFAULT_STORAGE_PRICE
else:
self.exec_fee_factor = contracts.PolicyContract().get_exec_fee_factor(snapshot)
self.STORAGE_PRICE = contracts.PolicyContract().get_storage_price(snapshot)
self._context_state: Dict[vm.ExecutionContext, contracts.ContractState] = {}
from neo3.contracts import interop
self.interop = interop
def checkwitness(self, hash_: types.UInt160) -> bool:
"""
Check if the hash is a valid witness for the engines script_container
"""
with suppress(ValueError):
if hash_ == self.calling_scripthash:
return True
if isinstance(self.script_container, payloads.Transaction):
tx = self.script_container
response = tx.try_get_attribute(payloads.OracleResponse)
if response is None:
signers = tx.signers
else:
signers = []
request = contracts.OracleContract().get_request(self.snapshot, response.id)
if request:
tmp_tx = contracts.LedgerContract().get_tx_for_contract(self.snapshot, request.original_tx_id)
if tmp_tx:
signers = tmp_tx.signers
for s in signers:
if s.account == hash_:
signer = s
break
else:
return False
if signer.scope == payloads.WitnessScope.GLOBAL:
return True
if payloads.WitnessScope.CALLED_BY_ENTRY in signer.scope:
if self.calling_scripthash == self.entry_scripthash:
return True
if payloads.WitnessScope.CUSTOM_CONTRACTS in signer.scope:
if self.current_scripthash in signer.allowed_contracts:
return True
if payloads.WitnessScope.CUSTOM_GROUPS in signer.scope:
if contracts.CallFlags.READ_STATES not in \
contracts.CallFlags(self.current_context.call_flags):
raise ValueError("Context requires callflags ALLOW_STATES")
contract = contracts.ManagementContract().get_contract(self.snapshot, self.calling_scripthash)
if contract is None:
return False
group_keys = set(map(lambda g: g.public_key, contract.manifest.groups))
if any(group_keys.intersection(signer.allowed_groups)):
return True
return False
if contracts.CallFlags.READ_STATES not in \
contracts.CallFlags(self.current_context.call_flags):
raise ValueError("Context requires callflags ALLOW_STATES")
# for other IVerifiable types like Block
hashes_for_verifying = self.script_container.get_script_hashes_for_verifying(self.snapshot)
return hash_ in hashes_for_verifying
def _stackitem_to_native(self, stack_item: vm.StackItem, target_type: Type[object]):
# checks for type annotations like `List[bytes]` (similar to byte[][] in C#)
if hasattr(target_type, '__origin__') and target_type.__origin__ == list: # type: ignore
element_type = target_type.__args__[0] # type: ignore
array = []
if isinstance(stack_item, vm.ArrayStackItem):
for e in stack_item:
array.append(self._convert(e, element_type))
else:
count = stack_item.to_biginteger()
if count > self.MAX_STACK_SIZE:
raise ValueError
# mypy bug: https://github.com/python/mypy/issues/9755
for e in range(count): # type: ignore
array.append(self._convert(self.pop(), element_type))
return array
else:
try:
return self._convert(stack_item, target_type)
except ValueError:
if isinstance(stack_item, vm.InteropStackItem):
return stack_item.get_object()
else:
raise
def _convert(self, stack_item: vm.StackItem, class_type: Type[object]) -> object:
"""
convert VM type to native
"""
if class_type in [vm.StackItem, vm.PointerStackItem, vm.ArrayStackItem, vm.InteropStackItem]:
return stack_item
elif class_type == int:
return int(stack_item.to_biginteger())
elif class_type == vm.BigInteger:
return stack_item.to_biginteger()
# mypy bug? https://github.com/python/mypy/issues/9756
elif class_type in [bytes, bytearray]: # type: ignore
return stack_item.to_array()
elif class_type == bool:
return stack_item.to_boolean()
elif class_type == types.UInt160:
return types.UInt160(data=stack_item.to_array())
elif class_type == types.UInt256:
return types.UInt256(data=stack_item.to_array())
elif class_type == str:
if stack_item == vm.NullStackItem():
return ""
return stack_item.to_array().decode()
elif class_type == cryptography.ECPoint:
return cryptography.ECPoint.deserialize_from_bytes(stack_item.to_array())
elif issubclass(class_type, enum.Enum):
if stack_item.get_type() == vm.StackItemType.INTEGER:
stack_item = cast(vm.IntegerStackItem, stack_item)
# mypy seems to have trouble understanding types that support __int__
return class_type(int(stack_item)) # type: ignore
elif stack_item.get_type() == vm.StackItemType.BYTESTRING:
stack_item = cast(vm.ByteStringStackItem, stack_item)
return class_type(int(stack_item.to_biginteger())) # type: ignore
raise ValueError(f"Unknown class type, don't know how to convert: {class_type}")
def _native_to_stackitem(self, value, native_type) -> vm.StackItem:
"""
Convert native type to VM type
Note: order of checking matters.
e.g. a Transaction should be treated as IInteropable, while its also ISerializable
"""
if isinstance(value, vm.StackItem):
return value
elif value is None:
return vm.NullStackItem()
elif native_type in [int, vm.BigInteger]:
return vm.IntegerStackItem(value)
elif issubclass(native_type, IInteroperable):
value_ = cast(IInteroperable, value)
return value_.to_stack_item(self.reference_counter)
elif issubclass(native_type, serialization.ISerializable):
serializable_value = cast(serialization.ISerializable, value)
return vm.ByteStringStackItem(serializable_value.to_array())
# mypy bug? https://github.com/python/mypy/issues/9756
elif native_type in [bytes, bytearray]: # type: ignore
return vm.ByteStringStackItem(value)
elif native_type == str:
return vm.ByteStringStackItem(bytes(value, 'utf-8'))
elif native_type == bool:
return vm.BooleanStackItem(value)
elif issubclass(native_type, (enum.IntFlag, enum.IntEnum)):
return self._native_to_stackitem(value.value, int)
elif hasattr(native_type, '__origin__') and native_type.__origin__ == Union: # type: ignore
# handle typing.Optional[type], Optional is an alias for Union[x, None]
# only support specifying 1 type
if len(native_type.__args__) != 2:
raise ValueError(f"Don't know how to convert native type {native_type} to stackitem")
for i in native_type.__args__:
if i is None:
continue
return self._native_to_stackitem(value, native_type)
else:
raise ValueError # shouldn't be possible, but silences mypy
else:
return vm.StackItem.from_interface(value)
def on_syscall(self, method_id: int) -> Any:
"""
Handle interop syscalls.
Args:
method_id: unique syscall identifier.
Raise:
KeyError: if `method_id` is syscall that is not registered with the engine.
ValueError: if the requested syscall handler is called with the wrong call flags.
ValueError: if engine stack parameter to native type conversion fails
Returns:
The result of the syscall handler
"""
descriptor = self.interop.InteropService.get_descriptor(method_id)
if descriptor is None:
raise KeyError(f"Requested interop {method_id} is not valid")
if descriptor.required_call_flags not in contracts.CallFlags(self.current_context.call_flags):
raise ValueError(f"Cannot call {descriptor.method} with {self.current_context.call_flags}")
self.add_gas(descriptor.price * self.exec_fee_factor)
parameters = []
for target_type in descriptor.parameters:
try:
item = self.pop()
parameters.append(self._stackitem_to_native(item, target_type))
except IndexError:
raise ValueError("Failed to pop parameter from stack")
except Exception:
raise ValueError(f"Failed to convert parameter stack item '{item}' to type '{target_type}'")
if len(parameters) > 0:
return_value = descriptor.handler(self, *parameters)
else:
return_value = descriptor.handler(self)
if descriptor.has_return_value:
self.push(self._native_to_stackitem(return_value, type(return_value)))
return return_value
def invoke_syscall_by_name(self, method: str) -> Any:
"""
Helper function to call `on_syscall` using the syscall name.
Args:
method: full qualified syscall name. e.g. "System.Runtime.Platform"
Returns: the result of the syscall handler. e.g. for "System.Runtime.Platform" returns "NEO"
"""
return self.on_syscall(contracts.syscall_name_to_int(method))
@property
def current_scripthash(self) -> types.UInt160:
"""
Get the script hash of the current executing smart contract
Note: a smart contract can call other smart contracts.
"""
if len(self.current_context.scripthash_bytes) == 0:
return to_script_hash(self.current_context.script._value)
return types.UInt160(self.current_context.scripthash_bytes)
@property
def calling_scripthash(self) -> types.UInt160:
"""
Get the script hash of the smart contract that called the current executing smart contract.
Note: a smart contract can call other smart contracts.
Raises:
ValueError: if the current executing contract has not been called by another contract.
"""
if len(self.current_context.calling_scripthash_bytes) == 0:
raise ValueError("Cannot retrieve calling script_hash - current context has not yet been called")
return types.UInt160(self.current_context.calling_scripthash_bytes)
@property
def entry_scripthash(self) -> types.UInt160:
"""
Get the script hash of the first smart contract loaded into the engine
Note: a smart contract can call other smart contracts.
"""
if len(self.entry_context.scripthash_bytes) == 0:
return to_script_hash(self.entry_context.script._value)
return types.UInt160(self.entry_context.scripthash_bytes)
def get_invocation_counter(self) -> int:
"""
Get the number of times the current contract has been called during this execute() run.
Note: the counter increases with every "System.Contract.Call" SYSCALL
Raises:
ValueError: if the contract has not been called.
"""
counter = self._invocation_counter.get(self.current_scripthash, None)
if counter is None:
self._invocation_counter.update({self.current_scripthash: 1})
counter = 1
return counter
def load_script_with_callflags(self,
script: vm.Script,
call_flags: contracts.CallFlags,
initial_position: int = 0,
rvcount: int = -1,
contract_state: Optional[contracts.ContractState] = None):
context = super(ApplicationEngine, self).load_script(script, rvcount, initial_position)
context.call_flags = int(call_flags)
if contract_state is not None:
self._context_state.update({context: contract_state})
return context
def call_from_native(self,
calling_scripthash: types.UInt160,
hash_: types.UInt160,
method: str,
args: List[vm.StackItem]) -> None:
ctx = self.current_context
self._contract_call_internal(hash_, method, contracts.CallFlags.ALL, False, args)
self.current_context.calling_scripthash_bytes = calling_scripthash.to_array()
while self.current_context != ctx:
self.step_out()
def step_out(self) -> None:
c = len(self.invocation_stack)
while self.state != vm.VMState.HALT and self.state != vm.VMState.FAULT and len(self.invocation_stack) >= c:
self._execute_next()
if self.state == vm.VMState.FAULT:
raise ValueError(f"Call from native contract failed: {self.exception_message}")
def load_contract(self,
contract: contracts.ContractState,
method_descriptor: contracts.ContractMethodDescriptor,
flags: contracts.CallFlags) -> Optional[vm.ExecutionContext]:
rvcount = 0 if method_descriptor.return_type == contracts.ContractParameterType.VOID else 1
context = self.load_script_with_callflags(vm.Script(contract.script),
flags,
method_descriptor.offset,
rvcount,
contract)
# configure state
context.call_flags = int(flags)
context.scripthash_bytes = contract.hash.to_array()
init = contract.manifest.abi.get_method("_initialize", 0)
if init is not None:
self.load_context(context.clone(init.offset))
return context
def load_token(self, token_id: int) -> vm.ExecutionContext:
contract = self._context_state.get(self.current_context, None)
if contract is None:
raise ValueError("Current context has no contract state")
if token_id >= len(contract.nef.tokens):
raise ValueError("token_id exceeds available tokens")
token = contract.nef.tokens[token_id]
if token.parameters_count > len(self.current_context.evaluation_stack):
raise ValueError("Token count exceeds available paremeters on evaluation stack")
args: List[vm.StackItem] = []
for _ in range(token.parameters_count):
args.append(self.pop())
return self._contract_call_internal(token.hash, token.method, token.call_flags, token.has_return_value, args)
def call_native(self, name: str) -> None:
contract = contracts.ManagementContract().get_contract_by_name(name)
if contract is None or contract.active_block_index > self.snapshot.persisting_block.index:
raise ValueError
contract.invoke(self)
def context_unloaded(self, context: vm.ExecutionContext) -> None:
self._context_state.pop(context, None)
def _contract_call_internal(self,
contract_hash: types.UInt160,
method: str,
flags: contracts.CallFlags,
has_return_value: bool,
args: List[vm.StackItem]) -> vm.ExecutionContext:
target_contract = contracts.ManagementContract().get_contract(self.snapshot, contract_hash)
if target_contract is None:
raise ValueError("[System.Contract.Call] Can't find target contract")
method_descriptor = target_contract.manifest.abi.get_method(method, len(args))
if method_descriptor is None:
raise ValueError(f"[System.Contract.Call] Method '{method}' with {len(args)} arguments does not exist on "
f"target contract")
return self._contract_call_internal2(target_contract, method_descriptor, flags, has_return_value, args)
def load_context(self, context: vm.ExecutionContext) -> None:
if len(context.scripthash_bytes) == 0:
context.scripthash_bytes = to_script_hash(context.script._value).to_array()
contract_hash = types.UInt160(data=context.scripthash_bytes)
counter = self._invocation_counter.get(contract_hash, 0)
self._invocation_counter.update({contract_hash: counter + 1})
super(ApplicationEngine, self).load_context(context)
def _contract_call_internal2(self,
target_contract: contracts.ContractState,
method_descriptor: contracts.ContractMethodDescriptor,
flags: contracts.CallFlags,
has_return_value: bool,
args: List[vm.StackItem]):
if method_descriptor.safe:
flags &= ~contracts.CallFlags.WRITE_STATES
else:
current_contract = contracts.ManagementContract().get_contract(self.snapshot, self.current_scripthash)
if current_contract and not current_contract.can_call(target_contract, method_descriptor.name):
raise ValueError(
f"[System.Contract.Call] Not allowed to call target method '{method_descriptor.name}' according "
f"to manifest")
counter = self._invocation_counter.get(target_contract.hash, 0)
self._invocation_counter.update({target_contract.hash: counter + 1})
state = self.current_context
calling_script_hash_bytes = state.scripthash_bytes
calling_flags = state.call_flags
arg_len = len(args)
expected_len = len(method_descriptor.parameters)
if arg_len != expected_len:
raise ValueError(
f"[System.Contract.Call] Invalid number of contract arguments. Expected {expected_len} actual {arg_len}") # noqa
if has_return_value ^ (method_descriptor.return_type != contracts.ContractParameterType.VOID):
raise ValueError("Return value type does not match")
context_new = self.load_contract(target_contract,
method_descriptor,
flags & calling_flags)
if context_new is None:
raise ValueError
context_new.calling_scripthash_bytes = calling_script_hash_bytes
for item in reversed(args):
context_new.evaluation_stack.push(item)
if contracts.NativeContract.is_native(target_contract.hash):
context_new.evaluation_stack.push(vm.ByteStringStackItem(method_descriptor.name.encode('utf-8')))
return context_new
|
the-stack_106_13919
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
class RolesV3TestJSON(base.BaseIdentityV3AdminTest):
@classmethod
def resource_setup(cls):
super(RolesV3TestJSON, cls).resource_setup()
cls.roles = list()
for _ in range(3):
role_name = data_utils.rand_name(name='role')
role = cls.roles_client.create_role(name=role_name)['role']
cls.roles.append(role)
cls.fetched_role_ids = list()
u_name = data_utils.rand_name('user')
u_desc = '%s description' % u_name
u_email = '%[email protected]' % u_name
cls.u_password = data_utils.rand_password()
cls.domain = cls.domains_client.create_domain(
name=data_utils.rand_name('domain'),
description=data_utils.rand_name('domain-desc'))['domain']
cls.project = cls.projects_client.create_project(
data_utils.rand_name('project'),
description=data_utils.rand_name('project-desc'),
domain_id=cls.domain['id'])['project']
cls.group_body = cls.groups_client.create_group(
name=data_utils.rand_name('Group'), project_id=cls.project['id'],
domain_id=cls.domain['id'])['group']
cls.user_body = cls.users_client.create_user(
name=u_name, description=u_desc, password=cls.u_password,
email=u_email, project_id=cls.project['id'],
domain_id=cls.domain['id'])['user']
cls.role = cls.roles_client.create_role(
name=data_utils.rand_name('Role'))['role']
@classmethod
def resource_cleanup(cls):
cls.roles_client.delete_role(cls.role['id'])
cls.groups_client.delete_group(cls.group_body['id'])
cls.users_client.delete_user(cls.user_body['id'])
cls.projects_client.delete_project(cls.project['id'])
# NOTE(harika-vakadi): It is necessary to disable the domain
# before deleting,or else it would result in unauthorized error
cls.domains_client.update_domain(cls.domain['id'], enabled=False)
cls.domains_client.delete_domain(cls.domain['id'])
for role in cls.roles:
cls.roles_client.delete_role(role['id'])
super(RolesV3TestJSON, cls).resource_cleanup()
def _list_assertions(self, body, fetched_role_ids, role_id):
self.assertEqual(len(body), 1)
self.assertIn(role_id, fetched_role_ids)
@test.attr(type='smoke')
@decorators.idempotent_id('18afc6c0-46cf-4911-824e-9989cc056c3a')
def test_role_create_update_show_list(self):
r_name = data_utils.rand_name('Role')
role = self.roles_client.create_role(name=r_name)['role']
self.addCleanup(self.roles_client.delete_role, role['id'])
self.assertIn('name', role)
self.assertEqual(role['name'], r_name)
new_name = data_utils.rand_name('NewRole')
updated_role = self.roles_client.update_role(role['id'],
name=new_name)['role']
self.assertIn('name', updated_role)
self.assertIn('id', updated_role)
self.assertIn('links', updated_role)
self.assertNotEqual(r_name, updated_role['name'])
new_role = self.roles_client.show_role(role['id'])['role']
self.assertEqual(new_name, new_role['name'])
self.assertEqual(updated_role['id'], new_role['id'])
roles = self.roles_client.list_roles()['roles']
self.assertIn(role['id'], [r['id'] for r in roles])
@decorators.idempotent_id('c6b80012-fe4a-498b-9ce8-eb391c05169f')
def test_grant_list_revoke_role_to_user_on_project(self):
self.roles_client.create_user_role_on_project(self.project['id'],
self.user_body['id'],
self.role['id'])
roles = self.roles_client.list_user_roles_on_project(
self.project['id'], self.user_body['id'])['roles']
for i in roles:
self.fetched_role_ids.append(i['id'])
self._list_assertions(roles, self.fetched_role_ids,
self.role['id'])
self.roles_client.check_user_role_existence_on_project(
self.project['id'], self.user_body['id'], self.role['id'])
self.roles_client.delete_role_from_user_on_project(
self.project['id'], self.user_body['id'], self.role['id'])
@decorators.idempotent_id('6c9a2940-3625-43a3-ac02-5dcec62ef3bd')
def test_grant_list_revoke_role_to_user_on_domain(self):
self.roles_client.create_user_role_on_domain(
self.domain['id'], self.user_body['id'], self.role['id'])
roles = self.roles_client.list_user_roles_on_domain(
self.domain['id'], self.user_body['id'])['roles']
for i in roles:
self.fetched_role_ids.append(i['id'])
self._list_assertions(roles, self.fetched_role_ids,
self.role['id'])
self.roles_client.check_user_role_existence_on_domain(
self.domain['id'], self.user_body['id'], self.role['id'])
self.roles_client.delete_role_from_user_on_domain(
self.domain['id'], self.user_body['id'], self.role['id'])
@decorators.idempotent_id('cbf11737-1904-4690-9613-97bcbb3df1c4')
def test_grant_list_revoke_role_to_group_on_project(self):
# Grant role to group on project
self.roles_client.create_group_role_on_project(
self.project['id'], self.group_body['id'], self.role['id'])
# List group roles on project
roles = self.roles_client.list_group_roles_on_project(
self.project['id'], self.group_body['id'])['roles']
for i in roles:
self.fetched_role_ids.append(i['id'])
self._list_assertions(roles, self.fetched_role_ids,
self.role['id'])
# Add user to group, and insure user has role on project
self.groups_client.add_group_user(self.group_body['id'],
self.user_body['id'])
self.addCleanup(self.groups_client.delete_group_user,
self.group_body['id'], self.user_body['id'])
body = self.token.auth(user_id=self.user_body['id'],
password=self.u_password,
user_domain_name=self.domain['name'],
project_name=self.project['name'],
project_domain_name=self.domain['name'])
roles = body['token']['roles']
self.assertEqual(len(roles), 1)
self.assertEqual(roles[0]['id'], self.role['id'])
self.roles_client.check_role_from_group_on_project_existence(
self.project['id'], self.group_body['id'], self.role['id'])
# Revoke role to group on project
self.roles_client.delete_role_from_group_on_project(
self.project['id'], self.group_body['id'], self.role['id'])
@decorators.idempotent_id('4bf8a70b-e785-413a-ad53-9f91ce02faa7')
def test_grant_list_revoke_role_to_group_on_domain(self):
self.roles_client.create_group_role_on_domain(
self.domain['id'], self.group_body['id'], self.role['id'])
roles = self.roles_client.list_group_roles_on_domain(
self.domain['id'], self.group_body['id'])['roles']
for i in roles:
self.fetched_role_ids.append(i['id'])
self._list_assertions(roles, self.fetched_role_ids,
self.role['id'])
self.roles_client.check_role_from_group_on_domain_existence(
self.domain['id'], self.group_body['id'], self.role['id'])
self.roles_client.delete_role_from_group_on_domain(
self.domain['id'], self.group_body['id'], self.role['id'])
@decorators.idempotent_id('f5654bcc-08c4-4f71-88fe-05d64e06de94')
def test_list_roles(self):
# Return a list of all roles
body = self.roles_client.list_roles()['roles']
found = [role for role in body if role in self.roles]
self.assertEqual(len(found), len(self.roles))
def _create_implied_role(self, prior_role_id, implies_role_id,
ignore_not_found=False):
self.roles_client.create_role_inference_rule(
prior_role_id, implies_role_id)
if ignore_not_found:
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
self.roles_client.delete_role_inference_rule,
prior_role_id,
implies_role_id)
else:
self.addCleanup(
self.roles_client.delete_role_inference_rule,
prior_role_id,
implies_role_id)
@decorators.idempotent_id('c90c316c-d706-4728-bcba-eb1912081b69')
def test_implied_roles_create_delete(self):
prior_role_id = self.roles[0]['id']
implies_role_id = self.roles[1]['id']
# Create an inference rule from prior_role to implies_role
self._create_implied_role(prior_role_id, implies_role_id,
ignore_not_found=True)
# Check if the inference rule exists
self.roles_client.show_role_inference_rule(
prior_role_id, implies_role_id)
# Delete the inference rule
self.roles_client.delete_role_inference_rule(
prior_role_id, implies_role_id)
# Check if the inference rule no longer exists
self.assertRaises(
lib_exc.NotFound,
self.roles_client.show_role_inference_rule,
prior_role_id,
implies_role_id)
@decorators.idempotent_id('dc6f5959-b74d-4e30-a9e5-a8255494ff00')
def test_roles_hierarchy(self):
# Create inference rule from "roles[0]" to "role[1]"
self._create_implied_role(
self.roles[0]['id'], self.roles[1]['id'])
# Create inference rule from "roles[0]" to "role[2]"
self._create_implied_role(
self.roles[0]['id'], self.roles[2]['id'])
# Create inference rule from "roles[2]" to "role"
self._create_implied_role(
self.roles[2]['id'], self.role['id'])
# Listing inferences rules from "roles[2]" should only return "role"
rules = self.roles_client.list_role_inferences_rules(
self.roles[2]['id'])['role_inference']
self.assertEqual(1, len(rules['implies']))
self.assertEqual(self.role['id'], rules['implies'][0]['id'])
# Listing inferences rules from "roles[0]" should return "roles[1]" and
# "roles[2]" (only direct rules are listed)
rules = self.roles_client.list_role_inferences_rules(
self.roles[0]['id'])['role_inference']
implies_ids = [role['id'] for role in rules['implies']]
self.assertEqual(2, len(implies_ids))
self.assertIn(self.roles[1]['id'], implies_ids)
self.assertIn(self.roles[2]['id'], implies_ids)
@decorators.idempotent_id('c8828027-df48-4021-95df-b65b92c7429e')
def test_assignments_for_implied_roles_create_delete(self):
# Create a grant using "roles[0]"
self.roles_client.create_user_role_on_project(
self.project['id'], self.user_body['id'], self.roles[0]['id'])
self.addCleanup(
self.roles_client.delete_role_from_user_on_project,
self.project['id'], self.user_body['id'], self.roles[0]['id'])
# Create an inference rule from "roles[0]" to "roles[1]"
self._create_implied_role(self.roles[0]['id'], self.roles[1]['id'],
ignore_not_found=True)
# In the effective list of role assignments, both prior role and
# implied role should be present. This means that a user can
# authenticate using both roles (both roles will be present
# in the token).
params = {'scope.project.id': self.project['id'],
'user.id': self.user_body['id']}
role_assignments = self.role_assignments.list_role_assignments(
effective=True, **params)['role_assignments']
self.assertEqual(2, len(role_assignments))
roles_ids = [assignment['role']['id']
for assignment in role_assignments]
self.assertIn(self.roles[0]['id'], roles_ids)
self.assertIn(self.roles[1]['id'], roles_ids)
# After deleting the implied role, only the assignment with "roles[0]"
# should be present.
self.roles_client.delete_role_inference_rule(
self.roles[0]['id'], self.roles[1]['id'])
role_assignments = self.role_assignments.list_role_assignments(
effective=True, **params)['role_assignments']
self.assertEqual(1, len(role_assignments))
roles_ids = [assignment['role']['id']
for assignment in role_assignments]
self.assertIn(self.roles[0]['id'], roles_ids)
@decorators.idempotent_id('d92a41d2-5501-497a-84bb-6e294330e8f8')
def test_domain_roles_create_delete(self):
domain_role = self.roles_client.create_role(
name=data_utils.rand_name('domain_role'),
domain_id=self.domain['id'])['role']
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
self.roles_client.delete_role,
domain_role['id'])
domain_roles = self.roles_client.list_roles(
domain_id=self.domain['id'])['roles']
self.assertEqual(1, len(domain_roles))
self.assertIn(domain_role, domain_roles)
self.roles_client.delete_role(domain_role['id'])
domain_roles = self.roles_client.list_roles(
domain_id=self.domain['id'])['roles']
self.assertEmpty(domain_roles)
@decorators.idempotent_id('eb1e1c24-1bc4-4d47-9748-e127a1852c82')
def test_implied_domain_roles(self):
# Create two roles in the same domain
domain_role1 = self.setup_test_role(domain_id=self.domain['id'])
domain_role2 = self.setup_test_role(domain_id=self.domain['id'])
# Check if we can create an inference rule from roles in the same
# domain
self._create_implied_role(domain_role1['id'], domain_role2['id'])
# Create another role in a different domain
domain2 = self.setup_test_domain()
domain_role3 = self.setup_test_role(domain_id=domain2['id'])
# Check if we can create cross domain implied roles
self._create_implied_role(domain_role1['id'], domain_role3['id'])
# Finally, we also should be able to create an implied from a
# domain role to a global one
self._create_implied_role(domain_role1['id'], self.role['id'])
if CONF.identity_feature_enabled.forbid_global_implied_dsr:
# The contrary is not true: we can't create an inference rule
# from a global role to a domain role
self.assertRaises(
lib_exc.Forbidden,
self.roles_client.create_role_inference_rule,
self.role['id'],
domain_role1['id'])
@decorators.idempotent_id('3859df7e-5b78-4e4d-b10e-214c8953842a')
def test_assignments_for_domain_roles(self):
domain_role = self.setup_test_role(domain_id=self.domain['id'])
# Create a grant using "domain_role"
self.roles_client.create_user_role_on_project(
self.project['id'], self.user_body['id'], domain_role['id'])
self.addCleanup(
self.roles_client.delete_role_from_user_on_project,
self.project['id'], self.user_body['id'], domain_role['id'])
# NOTE(rodrigods): Regular roles would appear in the effective
# list of role assignments (meaning the role would be returned in
# a token) as a result from the grant above. This is not the case
# for domain roles, they should not appear in the effective role
# assignments list.
params = {'scope.project.id': self.project['id'],
'user.id': self.user_body['id']}
role_assignments = self.role_assignments.list_role_assignments(
effective=True, **params)['role_assignments']
self.assertEmpty(role_assignments)
@decorators.idempotent_id('3748c316-c18f-4b08-997b-c60567bc6235')
def test_list_all_implied_roles(self):
# Create inference rule from "roles[0]" to "roles[1]"
self._create_implied_role(
self.roles[0]['id'], self.roles[1]['id'])
# Create inference rule from "roles[0]" to "roles[2]"
self._create_implied_role(
self.roles[0]['id'], self.roles[2]['id'])
# Create inference rule from "roles[2]" to "role"
self._create_implied_role(
self.roles[2]['id'], self.role['id'])
rules = self.roles_client.list_all_role_inference_rules()[
'role_inferences']
# Sort the rules by the number of inferences, since there should be 1
# inference between "roles[2]" and "role" and 2 inferences for
# "roles[0]": between "roles[1]" and "roles[2]".
sorted_rules = sorted(rules, key=lambda r: len(r['implies']))
# Check that 2 sets of rules are returned.
self.assertEqual(2, len(sorted_rules))
# Check that only 1 inference rule exists between "roles[2]" and "role"
self.assertEqual(1, len(sorted_rules[0]['implies']))
# Check that 2 inference rules exist for "roles[0]": one between
# "roles[1]" and one between "roles[2]".
self.assertEqual(2, len(sorted_rules[1]['implies']))
# Check that "roles[2]" is the "prior_role" and that "role" is the
# "implies" role.
self.assertEqual(self.roles[2]['id'],
sorted_rules[0]['prior_role']['id'])
self.assertEqual(self.role['id'],
sorted_rules[0]['implies'][0]['id'])
# Check that "roles[0]" is the "prior_role" and that "roles[1]" and
# "roles[2]" are the "implies" roles.
self.assertEqual(self.roles[0]['id'],
sorted_rules[1]['prior_role']['id'])
implies_ids = [r['id'] for r in sorted_rules[1]['implies']]
self.assertIn(self.roles[1]['id'], implies_ids)
self.assertIn(self.roles[2]['id'], implies_ids)
|
the-stack_106_13925
|
#
# This file is a command-module for Dragonfly.
# (c) Copyright 2008 by Christo Butcher
# Licensed under the LGPL, see <http://www.gnu.org/licenses/>
#
"""
Command-module for **taskbar** and icon tray access
===================================================
This file implements commands for controlling tasks on the
taskbar and icons in the icon tray.
Commands
--------
Command: **"[open | switch to] task <number>"**
Open the specified task on the taskbar.
The *<number>* extra is a number (1, 2, ...) designating which
task to activate. The application on the taskbar which is
closest to the "Start" button is task *number 1*. The next
is *number 2*, and so on. This works for both horizontal and
vertical taskbars.
Command: **"<action> task <number>"**
Similar to the command above, but performs some action
under specified task. The following actions are available:
- **"(menu | pop up)"** -- show the pop-up menu of the task,
as if a right-click was done on the taskbar.
- **"(maximize | max)"** -- maximize the task.
- **"(minimize | min)"** -- minimize the task.
- **"restore"** -- restore the task.
- **"close"** -- close the task.
Command: **"[open] icon <number>"**
Open the specified icon in the icon tray.
The *<number>* extra is a number (1, 2, ...) designating which
icon to activate. The numbering is similar to that explained
above for tasks on the taskbar.
Command: **"(menu | pop up) icon <number>"**
Pop-up the menu for the specified icon in the icon tray.
Usage examples
--------------
Several concrete usage examples of the commands described above:
- Say **"task 4"** to bring the fourth application on the taskbar
to the foreground.
- Say **"close task 2"** to close the second application on
the taskbar.
- Say **"icon 1"** to activate the first icon visible in the
icon tray, as if it was double-clicked.
- Say **"pop up icon 3"** to bring up the menu of the third
icon visible in the icon tray, as if it was right-clicked.
"""
import pkg_resources
pkg_resources.require("dragonfly >= 0.6.5beta1.dev-r76")
from dragonfly import *
#---------------------------------------------------------------------------
# This rule controls tasks on the taskbar.
class TaskRule(MappingRule):
mapping = {
"[open | switch to] task <n>": Key("space"),
"(menu | pop up) task <n>": Key("apps"),
"close task <n>": Key("apps/10, c"),
"restore task <n>": Key("apps/10, r"),
"(minimize | min) task <n>": Key("apps/10, n"),
"(maximize | max) task <n>": Key("apps/10, x"),
}
extras = [IntegerRef("n", 1, 12)]
def _process_recognition(self, value, extras):
count = extras["n"] - 1
action = Key("w-b/10, s-tab/10, right:%d/10" % count) + value
action.execute()
#---------------------------------------------------------------------------
# This rule controls icons in the icon tray.
class IconRule(MappingRule):
mapping = {
"[open] icon <n>": Key("enter"),
"(menu | pop up) icon <n>": Key("apps"),
}
extras = [IntegerRef("n", 1, 12)]
def _process_recognition(self, value, extras):
count = extras["n"] - 1
action = Key("w-b/10, right:%d/10" % count) + value
action.execute()
#---------------------------------------------------------------------------
# Load the grammar instance and define how to unload it.
grammar = Grammar("taskbar")
grammar.add_rule(TaskRule())
grammar.add_rule(IconRule())
grammar.load()
# Unload function which will be called by natlink at unload time.
def unload():
global grammar
if grammar: grammar.unload()
grammar = None
|
the-stack_106_13927
|
import re
import logging
import binascii
import struct
from . import register_backend, Backend
from ..errors import CLEError
l = logging.getLogger(name=__name__)
__all__ = ('Hex',)
intel_hex_re = re.compile(b":([0-9a-fA-F][0-9a-fA-F])([0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F])"
b"([0-9a-fA-F][0-9a-fA-F])([0-9a-fA-F][0-9a-fA-F]+)*([0-9a-fA-F][0-9a-fA-F])")
HEX_TYPE_DATA = 0x00
HEX_TYPE_EOF = 0x01
HEX_TYPE_EXTSEGADDR = 0x02
HEX_TYPE_STARTSEGADDR = 0x03
HEX_TYPE_EXTLINEARADDR = 0x04
HEX_TYPE_STARTLINEARADDR = 0x05
if bytes is not str:
chh = lambda x: x
else:
chh = ord
class Hex(Backend):
"""
A loader for Intel Hex Objects
See https://en.wikipedia.org/wiki/Intel_HEX
"""
is_default = True # Tell CLE to automatically consider using the Hex backend
@staticmethod
def parse_record(line):
m = intel_hex_re.match(line)
if not m:
raise CLEError("Invalid HEX record: " + line)
my_cksum = 0
count, addr, rectype, data, cksum = m.groups()
cksum = int(cksum, 16)
for d in binascii.unhexlify(line[1:-2]):
my_cksum = (my_cksum + chh(d)) % 256
my_cksum = ((my_cksum ^ 0xff) + 1) % 256
if my_cksum != cksum:
raise CLEError("Invalid checksum: Computed %s, found %s" % (hex(my_cksum), hex(cksum)))
count = int(count, 16)
addr = int(addr, 16)
rectype = int(rectype, 16)
if data:
data = binascii.unhexlify(data)
if data and count != len(data):
raise CLEError("Data length field does not match length of actual data: " + line)
return rectype, addr, data
@staticmethod
def coalesce_regions(regions):
# Lots of tiny memory regions is bad!
# The greedy algorithm to smash them together:
result = []
for addr, region in sorted(regions):
if result and result[-1][0] + len(result[-1][1]) == addr:
result[-1] = (result[-1][0], result[-1][1] + region)
else:
result.append((addr, region))
return result
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.arch is None:
raise CLEError("To use the Hex binary backend, you need to specify an architecture in the loader options.")
# Do the whole thing in one shot.
self.os = 'unknown'
got_base = False
got_entry = False
self._binary_stream.seek(0)
string = self._binary_stream.read()
recs = string.splitlines()
regions = []
max_addr = 0
min_addr = 0xffffffffffffffff
self._base_address = 0
for rec in recs:
rectype, addr, data = Hex.parse_record(rec)
if rectype == HEX_TYPE_DATA:
addr += self._base_address
#l.debug("Loading %d bytes at " % len(data) + hex(addr))
# Raw data. Put the bytes
regions.append((addr, data))
# We have to be careful about the min and max addrs
if addr < min_addr:
min_addr = addr
max_addr = max(max_addr, addr + len(data) - 1)
elif rectype == HEX_TYPE_EOF:
# EOF
l.debug("Got EOF record.")
break
elif rectype == HEX_TYPE_EXTSEGADDR:
# "Extended Mode" Segment address, take this value, multiply by 16, make the base
self._base_address = struct.unpack('>H', data)[0] * 16
got_base = True
l.debug("Loading a segment at %#x", self._base_address)
elif rectype == HEX_TYPE_STARTSEGADDR:
# Four bytes, the segment and the initial IP
got_base = True
got_entry = True
self._initial_cs, self._initial_ip = struct.unpack('>HH', data)
# The whole thing is the entry, as far as angr is concerned.
self._entry = struct.unpack('>I', data)[0]
l.debug("Got entry point at %#x", self._entry)
elif rectype == HEX_TYPE_EXTLINEARADDR:
got_base = True
# Specifies the base for all future data bytes.
self._base_address = struct.unpack('>H', data)[0] << 16
l.debug("Loading a segment at %#x", self._base_address)
elif rectype == HEX_TYPE_STARTLINEARADDR:
got_entry = True
# The 32-bit EIP, really the same as STARTSEGADDR, but some compilers pick one over the other.
self._entry = struct.unpack('>I', data)[0]
l.debug("Found entry point at %#x", self._entry)
self._initial_eip = self._entry
else:
raise CLEError("This HEX Object type is not implemented: " + hex(rectype))
if not got_base:
l.warning("No base address was found in this HEX object file. It is assumed to be 0")
if not got_entry:
l.warning("No entry point was found in this HEX object file, and it is assumed to be 0. "
"Specify one with `entry_point` to override.")
# HEX specifies a ton of tiny little memory regions. We now smash them together to make things faster.
new_regions = Hex.coalesce_regions(regions)
for addr, data in new_regions:
self.memory.add_backer(addr, data)
self._max_addr = max_addr
self._min_addr = min_addr
@staticmethod
def is_compatible(stream):
stream.seek(0)
s = stream.read(0x10)
stream.seek(0)
return s.startswith(b":")
register_backend("hex", Hex)
|
the-stack_106_13929
|
from typing import Dict, List, Optional, Any, Union
from pydantic import BaseModel, validator
from tracardi.service.plugin.domain.register import Plugin, Spec, MetaData, Documentation, PortDoc
from tracardi.service.plugin.domain.result import Result
from tracardi.service.plugin.runner import ActionRunner
from tracardi.domain.event import Event
from tracardi.domain.profile import Profile
from tracardi.domain.session import Session
class Configuration(BaseModel):
append: Optional[Dict[str, Any]] = {}
remove: Optional[Dict[str, Union[Any, List[Any]]]] = {}
@validator("remove")
def validate_remove(cls, value, values):
if 'append' not in values and 'remove' not in values:
raise ValueError("Please define `append` or `remove` in config section.")
return value
def validate(config: dict):
return Configuration(**config)
class AppendTraitAction(ActionRunner):
def __init__(self, **kwargs):
self.config = validate(kwargs)
async def run(self, payload: dict):
dot = self._get_dot_accessor(payload if isinstance(payload, dict) else None)
for destination, value in self.config.append.items():
value = dot[value]
if destination in dot:
if not isinstance(dot[destination], list):
# Make it a list with original value
dot[destination] = [dot[destination]]
if value not in dot[destination]:
dot[destination].append(value)
else:
dot[destination] = value
for destination, value in self.config.remove.items():
value = dot[value]
if destination in dot:
if not isinstance(dot[destination], list):
raise ValueError("Can not remove from non-list data.")
if isinstance(value, list):
for v in value:
if v in dot[destination]:
dot[destination].remove(v)
elif value in dot[destination]:
dot[destination].remove(value)
if self.event.metadata.profile_less is False:
if not isinstance(dot.profile['traits']['private'], dict):
raise ValueError("Error when appending [email protected] to value `{}`. "
"Private must have key:value pair. "
"E.g. `name`: `{}`".format(dot.profile['traits']['private'],
dot.profile['traits']['private']))
if not isinstance(dot.profile['traits']['public'], dict):
raise ValueError(
"Error when appending [email protected] to value `{}`. Public must have key:value pair. "
"E.g. `name`: `{}`".format(dot.profile['traits']['public'], dot.profile['traits']['public']))
profile = Profile(**dot.profile)
self.profile.replace(profile)
else:
if dot.profile:
self.console.warning("Profile changes were discarded in node `Append/Remove Trait`. "
"This event is profile less so there is no profile.")
event = Event(**dot.event)
self.event.replace(event)
if 'id' in dot.session:
session = Session(**dot.session)
self.session.replace(session)
return Result(port="payload", value=payload)
def register() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module='tracardi.process_engine.action.v1.traits.append_trait_action',
className='AppendTraitAction',
inputs=['payload'],
outputs=["payload"],
init={
"append": {
"target1": "source1",
"target2": "source2",
},
"remove": {
"target": ["item1", "item2"]
}
},
version='0.1',
license="MIT",
author="Risto Kowaczewski"
),
metadata=MetaData(
name='Append/Remove Trait',
desc='Appends/Removes trait to/from existing profile trait.',
icon='append',
group=["Data processing"],
documentation=Documentation(
inputs={
"payload": PortDoc(desc="This port takes any JSON-like object.")
},
outputs={
"payload": PortDoc(desc="This port returns given payload with traits appended or removed according"
" to configuration.")
}
)
)
)
|
the-stack_106_13931
|
from setuptools import setup, find_packages
from ftp_v5.version import version
def requirements():
with open('requirements', 'rt') as fin:
requirements = [line.strip() for line in fin]
return requirements
print(find_packages())
setup(
name='cos-ftp-server-v5',
version=version,
author='COS team',
author_email='[email protected]',
maintainer='iainyu',
maintainer_email='[email protected]',
url='https://cloud.tencent.com/product/cos',
license='MIT',
description='The ftp gateway for cos service,supporting the multi bucket.',
packages=find_packages(),
install_requires=requirements(),
include_package_data=True
)
|
the-stack_106_13932
|
import json
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from reddit.models import Submission, Comment, Vote
from users.models import RedditUser
from django.contrib.auth.models import User
from django.utils.crypto import get_random_string
from django.http import HttpResponseNotAllowed, HttpResponseBadRequest
class TestViewingThreadComments(TestCase):
def setUp(self):
self.c = Client()
self.credentials = {'username': 'username',
'password': 'password'}
author = RedditUser.objects.create(
user=User.objects.create_user(**self.credentials)
)
submission = Submission.objects.create(
id=1,
score=1,
title=get_random_string(length=12),
author=author
)
for _ in range(3):
Comment.objects.create(
author_name=author.user.username,
author=author,
submission=submission,
html_comment="root comment"
)
# Add some replies
parent = Comment.objects.get(id=1)
for _ in range(2):
Comment.objects.create(
author_name=author.user.username,
author=author,
submission=submission,
parent=parent,
html_comment="reply comment"
)
# add upvote to one root comment,
Vote.create(
user=author,
vote_object=Comment.objects.get(id=1),
vote_value=1
).save()
# and downvote to one reply comment
Vote.create(
user=author,
vote_object=Comment.objects.get(id=5),
vote_value=-1
).save()
# add upvote to the submission
Vote.create(
user=author,
vote_object=submission,
vote_value=1
).save()
def test_valid_public_comment_view(self):
self.c.logout()
r = self.c.get(reverse('thread', args=(1,)))
submission = Submission.objects.get(id=1)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.context['submission'], submission)
self.assertEqual(len(r.context['comments']), 5)
self.assertContains(r, 'root comment', count=3)
self.assertContains(r, 'reply comment', count=2)
self.assertEqual(r.context['comment_votes'], {})
self.assertIsNone(r.context['sub_vote'])
def test_comment_votes(self):
self.c.login(**self.credentials)
r = self.c.get(reverse('thread', args=(1,)))
self.assertEqual(r.status_code, 200)
self.assertEqual(r.context['sub_vote'], 1)
self.assertEqual(r.context['comment_votes'], {1: 1, 5: -1})
self.assertContains(r, 'root comment', count=3)
self.assertContains(r, 'reply comment', count=2)
def test_invalid_thread_id(self):
r = self.c.get(reverse('thread', args=(123,)))
self.assertEqual(r.status_code, 404)
class TestPostingComment(TestCase):
def setUp(self):
self.c = Client()
self.credentials = {'username': 'commentposttest',
'password': 'password'}
author = RedditUser.objects.create(
user=User.objects.create_user(**self.credentials)
)
Submission.objects.create(
id=99,
score=1,
title=get_random_string(length=12),
author=author
)
def test_post_only(self):
r = self.c.get(reverse('post_comment'))
self.assertIsInstance(r, HttpResponseNotAllowed)
def test_logged_out(self):
r = self.c.post(reverse('post_comment'))
self.assertEqual(r.status_code, 200)
json_response = json.loads(r.content.decode("utf-8"))
self.assertEqual(json_response['msg'], "You need to log in to post new comments.")
def test_missing_type_or_id(self):
self.c.login(**self.credentials)
for key in ['parentType', 'parentId']:
r = self.c.post(reverse('post_comment'),
data={key: 'comment'})
self.assertIsInstance(r, HttpResponseBadRequest)
r = self.c.post(reverse('post_comment'),
data={'parentType': 'InvalidType',
'parentId': 1})
self.assertIsInstance(r, HttpResponseBadRequest)
def test_no_comment_text(self):
self.c.login(**self.credentials)
test_data = {
'parentType': 'submission',
'parentId': 1,
'commentContent': ''
}
r = self.c.post(reverse('post_comment'), data=test_data)
self.assertEqual(r.status_code, 200)
json_response = json.loads(r.content.decode("utf-8"))
self.assertEqual(json_response['msg'],
'You have to write something.')
def test_invalid_or_wrong_parent_id(self):
self.c.login(**self.credentials)
test_data = {
'parentType': 'submission',
'parentId': 'invalid',
'commentContent': 'content'
}
r = self.c.post(reverse('post_comment'), data=test_data)
self.assertIsInstance(r, HttpResponseBadRequest)
test_data = {
'parentType': 'submission',
'parentId': 9999,
'commentContent': 'content'
}
r = self.c.post(reverse('post_comment'), data=test_data)
self.assertIsInstance(r, HttpResponseBadRequest)
test_data = {
'parentType': 'comment',
'parentId': 9999,
'commentContent': 'content'
}
r = self.c.post(reverse('post_comment'), data=test_data)
self.assertIsInstance(r, HttpResponseBadRequest)
def test_valid_comment_posting_thread(self):
self.c.login(**self.credentials)
test_data = {
'parentType': 'submission',
'parentId': 99,
'commentContent': 'thread root comment'
}
r = self.c.post(reverse('post_comment'), data=test_data)
self.assertEqual(r.status_code, 200)
json_r = json.loads(r.content.decode("utf-8"))
self.assertEqual(json_r['msg'], 'Your comment has been posted.')
all_comments = Comment.objects.filter(
submission=Submission.objects.get(id=99)
)
self.assertEqual(all_comments.count(), 1)
comment = all_comments.first()
self.assertEqual(comment.html_comment, '<p>thread root comment</p>\n')
self.assertEqual(comment.author.user.username, self.credentials['username'])
def test_valid_comment_posting_reply(self):
self.c.login(**self.credentials)
thread = Submission.objects.get(id=99)
author = RedditUser.objects.get(user=User.objects.get(
username=self.credentials['username']
))
comment = Comment.create(author, 'root comment', thread)
comment.save()
self.assertEqual(Comment.objects.filter(submission=thread).count(), 1)
test_data = {
'parentType': 'comment',
'parentId': comment.id,
'commentContent': 'thread reply comment',
'comment_count': 5
}
r = self.c.post(reverse('post_comment'), data=test_data)
self.assertEqual(r.status_code, 200)
json_r = json.loads(r.content.decode("utf-8"))
self.assertEqual(json_r['msg'], 'Your comment has been posted.')
self.assertEqual(Comment.objects.filter(submission=thread).count(), 2)
comment = Comment.objects.filter(submission=thread,
id=2).first()
self.assertEqual(comment.html_comment, '<p>thread reply comment</p>\n')
|
the-stack_106_13933
|
from collections import defaultdict
import json
import logging
import sys
import time
import ray
from ray import gcs_utils
from google.protobuf.json_format import MessageToDict
from ray._private import services
from ray.utils import (decode, binary_to_hex, hex_to_binary)
from ray._raylet import GlobalStateAccessor
logger = logging.getLogger(__name__)
class GlobalState:
"""A class used to interface with the Ray control state.
# TODO(zongheng): In the future move this to use Ray's redis module in the
# backend to cut down on # of request RPCs.
Attributes:
redis_client: The Redis client used to query the primary redis server.
redis_clients: Redis clients for each of the Redis shards.
global_state_accessor: The client used to query gcs table from gcs
server.
"""
def __init__(self):
"""Create a GlobalState object."""
# Args used for lazy init of this object.
self.redis_address = None
self.redis_password = None
# The redis server storing metadata, such as function table, client
# table, log files, event logs, workers/actions info.
self.redis_client = None
# Clients for the redis shards, storing the object table & task table.
self.redis_clients = None
self.global_state_accessor = None
def _check_connected(self):
"""Ensure that the object has been initialized before it is used.
This lazily initializes clients needed for state accessors.
Raises:
RuntimeError: An exception is raised if ray.init() has not been
called yet.
"""
if self.redis_client is None and self.redis_address is not None:
self._really_init_global_state()
if (self.redis_client is None or self.redis_clients is None
or self.global_state_accessor is None):
raise ray.exceptions.RaySystemError(
"Ray has not been started yet. You can start Ray with "
"'ray.init()'.")
def disconnect(self):
"""Disconnect global state from GCS."""
self.redis_client = None
self.redis_clients = None
self.redis_address = None
self.redis_password = None
if self.global_state_accessor is not None:
self.global_state_accessor.disconnect()
self.global_state_accessor = None
def _initialize_global_state(self, redis_address, redis_password=None):
"""Set args for lazily initialization of the GlobalState object.
It's possible that certain keys in Redis may not have been fully
populated yet. In this case, we will retry this method until they have
been populated or we exceed a timeout.
Args:
redis_address: The Redis address to connect.
redis_password: The password of the redis server.
"""
# Save args for lazy init of global state. This avoids opening extra
# redis connections from each worker until needed.
self.redis_address = redis_address
self.redis_password = redis_password
def _really_init_global_state(self, timeout=20):
self.redis_client = services.create_redis_client(
self.redis_address, self.redis_password)
self.global_state_accessor = GlobalStateAccessor(
self.redis_address, self.redis_password, False)
self.global_state_accessor.connect()
start_time = time.time()
num_redis_shards = None
redis_shard_addresses = []
while time.time() - start_time < timeout:
# Attempt to get the number of Redis shards.
num_redis_shards = self.redis_client.get("NumRedisShards")
if num_redis_shards is None:
print("Waiting longer for NumRedisShards to be populated.")
time.sleep(1)
continue
num_redis_shards = int(num_redis_shards)
assert num_redis_shards >= 1, (
f"Expected at least one Redis shard, found {num_redis_shards}."
)
# Attempt to get all of the Redis shards.
redis_shard_addresses = self.redis_client.lrange(
"RedisShards", start=0, end=-1)
if len(redis_shard_addresses) != num_redis_shards:
print("Waiting longer for RedisShards to be populated.")
time.sleep(1)
continue
# If we got here then we successfully got all of the information.
break
# Check to see if we timed out.
if time.time() - start_time >= timeout:
raise TimeoutError("Timed out while attempting to initialize the "
"global state. "
f"num_redis_shards = {num_redis_shards}, "
"redis_shard_addresses = "
f"{redis_shard_addresses}")
# Get the rest of the information.
self.redis_clients = []
for shard_address in redis_shard_addresses:
self.redis_clients.append(
services.create_redis_client(shard_address.decode(),
self.redis_password))
def _execute_command(self, key, *args):
"""Execute a Redis command on the appropriate Redis shard based on key.
Args:
key: The object ref or the task ID that the query is about.
args: The command to run.
Returns:
The value returned by the Redis command.
"""
client = self.redis_clients[key.redis_shard_hash() % len(
self.redis_clients)]
return client.execute_command(*args)
def _keys(self, pattern):
"""Execute the KEYS command on all Redis shards.
Args:
pattern: The KEYS pattern to query.
Returns:
The concatenated list of results from all shards.
"""
result = []
for client in self.redis_clients:
result.extend(list(client.scan_iter(match=pattern)))
return result
def object_table(self, object_ref=None):
"""Fetch and parse the object table info for one or more object refs.
Args:
object_ref: An object ref to fetch information about. If this is
None, then the entire object table is fetched.
Returns:
Information from the object table.
"""
self._check_connected()
if object_ref is not None:
object_ref = ray.ObjectRef(hex_to_binary(object_ref))
object_info = self.global_state_accessor.get_object_info(
object_ref)
if object_info is None:
return {}
else:
object_location_info = gcs_utils.ObjectLocationInfo.FromString(
object_info)
return self._gen_object_info(object_location_info)
else:
object_table = self.global_state_accessor.get_object_table()
results = {}
for i in range(len(object_table)):
object_location_info = gcs_utils.ObjectLocationInfo.FromString(
object_table[i])
results[binary_to_hex(object_location_info.object_id)] = \
self._gen_object_info(object_location_info)
return results
def _gen_object_info(self, object_location_info):
"""Parse object location info.
Returns:
Information from object.
"""
locations = []
for location in object_location_info.locations:
locations.append(ray.utils.binary_to_hex(location.manager))
object_info = {
"ObjectRef": ray.utils.binary_to_hex(
object_location_info.object_id),
"Locations": locations,
}
return object_info
def actor_table(self, actor_id):
"""Fetch and parse the actor table information for a single actor ID.
Args:
actor_id: A hex string of the actor ID to fetch information about.
If this is None, then the actor table is fetched.
Returns:
Information from the actor table.
"""
self._check_connected()
if actor_id is not None:
actor_id = ray.ActorID(hex_to_binary(actor_id))
actor_info = self.global_state_accessor.get_actor_info(actor_id)
if actor_info is None:
return {}
else:
actor_table_data = gcs_utils.ActorTableData.FromString(
actor_info)
return self._gen_actor_info(actor_table_data)
else:
actor_table = self.global_state_accessor.get_actor_table()
results = {}
for i in range(len(actor_table)):
actor_table_data = gcs_utils.ActorTableData.FromString(
actor_table[i])
results[binary_to_hex(actor_table_data.actor_id)] = \
self._gen_actor_info(actor_table_data)
return results
def _gen_actor_info(self, actor_table_data):
"""Parse actor table data.
Returns:
Information from actor table.
"""
actor_info = {
"ActorID": binary_to_hex(actor_table_data.actor_id),
"Name": actor_table_data.name,
"JobID": binary_to_hex(actor_table_data.job_id),
"Address": {
"IPAddress": actor_table_data.address.ip_address,
"Port": actor_table_data.address.port,
"NodeID": binary_to_hex(actor_table_data.address.raylet_id),
},
"OwnerAddress": {
"IPAddress": actor_table_data.owner_address.ip_address,
"Port": actor_table_data.owner_address.port,
"NodeID": binary_to_hex(
actor_table_data.owner_address.raylet_id),
},
"State": actor_table_data.state,
"NumRestarts": actor_table_data.num_restarts,
"Timestamp": actor_table_data.timestamp,
}
return actor_info
def node_resource_table(self, node_id=None):
"""Fetch and parse the node resource table info for one.
Args:
node_id: An node ID to fetch information about.
Returns:
Information from the node resource table.
"""
self._check_connected()
node_id = ray.NodeID(hex_to_binary(node_id))
node_resource_bytes = \
self.global_state_accessor.get_node_resource_info(node_id)
if node_resource_bytes is None:
return {}
else:
node_resource_info = gcs_utils.ResourceMap.FromString(
node_resource_bytes)
return {
key: value.resource_capacity
for key, value in node_resource_info.items.items()
}
def node_table(self):
"""Fetch and parse the Gcs node info table.
Returns:
Information about the node in the cluster.
"""
self._check_connected()
node_table = self.global_state_accessor.get_node_table()
results = []
for node_info_item in node_table:
item = gcs_utils.GcsNodeInfo.FromString(node_info_item)
node_info = {
"NodeID": ray.utils.binary_to_hex(item.node_id),
"Alive": item.state ==
gcs_utils.GcsNodeInfo.GcsNodeState.Value("ALIVE"),
"NodeManagerAddress": item.node_manager_address,
"NodeManagerHostname": item.node_manager_hostname,
"NodeManagerPort": item.node_manager_port,
"ObjectManagerPort": item.object_manager_port,
"ObjectStoreSocketName": item.object_store_socket_name,
"RayletSocketName": item.raylet_socket_name,
"MetricsExportPort": item.metrics_export_port,
}
node_info["alive"] = node_info["Alive"]
node_info["Resources"] = self.node_resource_table(
node_info["NodeID"]) if node_info["Alive"] else {}
results.append(node_info)
return results
def job_table(self):
"""Fetch and parse the Redis job table.
Returns:
Information about the Ray jobs in the cluster,
namely a list of dicts with keys:
- "JobID" (identifier for the job),
- "DriverIPAddress" (IP address of the driver for this job),
- "DriverPid" (process ID of the driver for this job),
- "StartTime" (UNIX timestamp of the start time of this job),
- "StopTime" (UNIX timestamp of the stop time of this job, if any)
"""
self._check_connected()
job_table = self.global_state_accessor.get_job_table()
results = []
for i in range(len(job_table)):
entry = gcs_utils.JobTableData.FromString(job_table[i])
job_info = {}
job_info["JobID"] = entry.job_id.hex()
job_info["DriverIPAddress"] = entry.driver_ip_address
job_info["DriverPid"] = entry.driver_pid
if entry.is_dead:
job_info["StopTime"] = entry.timestamp
else:
job_info["StartTime"] = entry.timestamp
results.append(job_info)
return results
def profile_table(self):
self._check_connected()
result = defaultdict(list)
profile_table = self.global_state_accessor.get_profile_table()
for i in range(len(profile_table)):
profile = gcs_utils.ProfileTableData.FromString(profile_table[i])
component_type = profile.component_type
component_id = binary_to_hex(profile.component_id)
node_ip_address = profile.node_ip_address
for event in profile.profile_events:
try:
extra_data = json.loads(event.extra_data)
except ValueError:
extra_data = {}
profile_event = {
"event_type": event.event_type,
"component_id": component_id,
"node_ip_address": node_ip_address,
"component_type": component_type,
"start_time": event.start_time,
"end_time": event.end_time,
"extra_data": extra_data
}
result[component_id].append(profile_event)
return dict(result)
def placement_group_table(self, placement_group_id=None):
self._check_connected()
if placement_group_id is not None:
placement_group_id = ray.PlacementGroupID(
hex_to_binary(placement_group_id.hex()))
placement_group_info = (
self.global_state_accessor.get_placement_group_info(
placement_group_id))
if placement_group_info is None:
return {}
else:
placement_group_info = (gcs_utils.PlacementGroupTableData.
FromString(placement_group_info))
return self._gen_placement_group_info(placement_group_info)
else:
placement_group_table = self.global_state_accessor.\
get_placement_group_table()
results = {}
for placement_group_info in placement_group_table:
placement_group_table_data = gcs_utils.\
PlacementGroupTableData.FromString(placement_group_info)
placement_group_id = binary_to_hex(
placement_group_table_data.placement_group_id)
results[placement_group_id] = \
self._gen_placement_group_info(placement_group_table_data)
return results
def _gen_placement_group_info(self, placement_group_info):
# This should be imported here, otherwise, it will error doc build.
from ray.core.generated.common_pb2 import PlacementStrategy
def get_state(state):
if state == ray.gcs_utils.PlacementGroupTableData.PENDING:
return "PENDING"
elif state == ray.gcs_utils.PlacementGroupTableData.CREATED:
return "CREATED"
else:
return "REMOVED"
def get_strategy(strategy):
if strategy == PlacementStrategy.PACK:
return "PACK"
elif strategy == PlacementStrategy.STRICT_PACK:
return "STRICT_PACK"
elif strategy == PlacementStrategy.STRICT_SPREAD:
return "STRICT_SPREAD"
elif strategy == PlacementStrategy.SPREAD:
return "SPREAD"
else:
raise ValueError(
f"Invalid strategy returned: {PlacementStrategy}")
assert placement_group_info is not None
return {
"placement_group_id": binary_to_hex(
placement_group_info.placement_group_id),
"name": placement_group_info.name,
"bundles": {
# The value here is needs to be dictionarified
# otherwise, the payload becomes unserializable.
bundle.bundle_id.bundle_index:
MessageToDict(bundle)["unitResources"]
for bundle in placement_group_info.bundles
},
"strategy": get_strategy(placement_group_info.strategy),
"state": get_state(placement_group_info.state),
}
def _seconds_to_microseconds(self, time_in_seconds):
"""A helper function for converting seconds to microseconds."""
time_in_microseconds = 10**6 * time_in_seconds
return time_in_microseconds
# Colors are specified at
# https://github.com/catapult-project/catapult/blob/master/tracing/tracing/base/color_scheme.html. # noqa: E501
_default_color_mapping = defaultdict(
lambda: "generic_work", {
"worker_idle": "cq_build_abandoned",
"task": "rail_response",
"task:deserialize_arguments": "rail_load",
"task:execute": "rail_animation",
"task:store_outputs": "rail_idle",
"wait_for_function": "detailed_memory_dump",
"ray.get": "good",
"ray.put": "terrible",
"ray.wait": "vsync_highlight_color",
"submit_task": "background_memory_dump",
"fetch_and_run_function": "detailed_memory_dump",
"register_remote_function": "detailed_memory_dump",
})
# These colors are for use in Chrome tracing.
_chrome_tracing_colors = [
"thread_state_uninterruptible",
"thread_state_iowait",
"thread_state_running",
"thread_state_runnable",
"thread_state_sleeping",
"thread_state_unknown",
"background_memory_dump",
"light_memory_dump",
"detailed_memory_dump",
"vsync_highlight_color",
"generic_work",
"good",
"bad",
"terrible",
# "black",
# "grey",
# "white",
"yellow",
"olive",
"rail_response",
"rail_animation",
"rail_idle",
"rail_load",
"startup",
"heap_dump_stack_frame",
"heap_dump_object_type",
"heap_dump_child_node_arrow",
"cq_build_running",
"cq_build_passed",
"cq_build_failed",
"cq_build_abandoned",
"cq_build_attempt_runnig",
"cq_build_attempt_passed",
"cq_build_attempt_failed",
]
def chrome_tracing_dump(self, filename=None):
"""Return a list of profiling events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file
by passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file.
Make sure to enable "Flow events" in the "View Options" menu.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling
events. Each profile event is a dictionary.
"""
# TODO(rkn): Support including the task specification data in the
# timeline.
# TODO(rkn): This should support viewing just a window of time or a
# limited number of events.
self._check_connected()
profile_table = self.profile_table()
all_events = []
for component_id_hex, component_events in profile_table.items():
# Only consider workers and drivers.
component_type = component_events[0]["component_type"]
if component_type not in ["worker", "driver"]:
continue
for event in component_events:
new_event = {
# The category of the event.
"cat": event["event_type"],
# The string displayed on the event.
"name": event["event_type"],
# The identifier for the group of rows that the event
# appears in.
"pid": event["node_ip_address"],
# The identifier for the row that the event appears in.
"tid": event["component_type"] + ":" +
event["component_id"],
# The start time in microseconds.
"ts": self._seconds_to_microseconds(event["start_time"]),
# The duration in microseconds.
"dur": self._seconds_to_microseconds(event["end_time"] -
event["start_time"]),
# What is this?
"ph": "X",
# This is the name of the color to display the box in.
"cname": self._default_color_mapping[event["event_type"]],
# The extra user-defined data.
"args": event["extra_data"],
}
# Modify the json with the additional user-defined extra data.
# This can be used to add fields or override existing fields.
if "cname" in event["extra_data"]:
new_event["cname"] = event["extra_data"]["cname"]
if "name" in event["extra_data"]:
new_event["name"] = event["extra_data"]["name"]
all_events.append(new_event)
if filename is not None:
with open(filename, "w") as outfile:
json.dump(all_events, outfile)
else:
return all_events
def chrome_tracing_object_transfer_dump(self, filename=None):
"""Return a list of transfer events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file
by passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file.
Make sure to enable "Flow events" in the "View Options" menu.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling
events. Each profile event is a dictionary.
"""
self._check_connected()
node_id_to_address = {}
for node_info in self.node_table():
node_id_to_address[node_info["NodeID"]] = "{}:{}".format(
node_info["NodeManagerAddress"],
node_info["ObjectManagerPort"])
all_events = []
for key, items in self.profile_table().items():
# Only consider object manager events.
if items[0]["component_type"] != "object_manager":
continue
for event in items:
if event["event_type"] == "transfer_send":
object_ref, remote_node_id, _, _ = event["extra_data"]
elif event["event_type"] == "transfer_receive":
object_ref, remote_node_id, _, _ = event["extra_data"]
elif event["event_type"] == "receive_pull_request":
object_ref, remote_node_id = event["extra_data"]
else:
assert False, "This should be unreachable."
# Choose a color by reading the first couple of hex digits of
# the object ref as an integer and turning that into a color.
object_ref_int = int(object_ref[:2], 16)
color = self._chrome_tracing_colors[object_ref_int % len(
self._chrome_tracing_colors)]
new_event = {
# The category of the event.
"cat": event["event_type"],
# The string displayed on the event.
"name": event["event_type"],
# The identifier for the group of rows that the event
# appears in.
"pid": node_id_to_address[key],
# The identifier for the row that the event appears in.
"tid": node_id_to_address[remote_node_id],
# The start time in microseconds.
"ts": self._seconds_to_microseconds(event["start_time"]),
# The duration in microseconds.
"dur": self._seconds_to_microseconds(event["end_time"] -
event["start_time"]),
# What is this?
"ph": "X",
# This is the name of the color to display the box in.
"cname": color,
# The extra user-defined data.
"args": event["extra_data"],
}
all_events.append(new_event)
# Add another box with a color indicating whether it was a send
# or a receive event.
if event["event_type"] == "transfer_send":
additional_event = new_event.copy()
additional_event["cname"] = "black"
all_events.append(additional_event)
elif event["event_type"] == "transfer_receive":
additional_event = new_event.copy()
additional_event["cname"] = "grey"
all_events.append(additional_event)
else:
pass
if filename is not None:
with open(filename, "w") as outfile:
json.dump(all_events, outfile)
else:
return all_events
def workers(self):
"""Get a dictionary mapping worker ID to worker information."""
self._check_connected()
# Get all data in worker table
worker_table = self.global_state_accessor.get_worker_table()
workers_data = {}
for i in range(len(worker_table)):
worker_table_data = gcs_utils.WorkerTableData.FromString(
worker_table[i])
if worker_table_data.is_alive and \
worker_table_data.worker_type == gcs_utils.WORKER:
worker_id = binary_to_hex(
worker_table_data.worker_address.worker_id)
worker_info = worker_table_data.worker_info
workers_data[worker_id] = {
"node_ip_address": decode(worker_info[b"node_ip_address"]),
"plasma_store_socket": decode(
worker_info[b"plasma_store_socket"])
}
if b"stderr_file" in worker_info:
workers_data[worker_id]["stderr_file"] = decode(
worker_info[b"stderr_file"])
if b"stdout_file" in worker_info:
workers_data[worker_id]["stdout_file"] = decode(
worker_info[b"stdout_file"])
return workers_data
def add_worker(self, worker_id, worker_type, worker_info):
""" Add a worker to the cluster.
Args:
worker_id: ID of this worker. Type is bytes.
worker_type: Type of this worker. Value is ray.gcs_utils.DRIVER or
ray.gcs_utils.WORKER.
worker_info: Info of this worker. Type is dict{str: str}.
Returns:
Is operation success
"""
worker_data = ray.gcs_utils.WorkerTableData()
worker_data.is_alive = True
worker_data.worker_address.worker_id = worker_id
worker_data.worker_type = worker_type
for k, v in worker_info.items():
worker_data.worker_info[k] = bytes(v, encoding="utf-8")
return self.global_state_accessor.add_worker_info(
worker_data.SerializeToString())
def _job_length(self):
event_log_sets = self.redis_client.keys("event_log*")
overall_smallest = sys.maxsize
overall_largest = 0
num_tasks = 0
for event_log_set in event_log_sets:
fwd_range = self.redis_client.zrange(
event_log_set, start=0, end=0, withscores=True)
overall_smallest = min(overall_smallest, fwd_range[0][1])
rev_range = self.redis_client.zrevrange(
event_log_set, start=0, end=0, withscores=True)
overall_largest = max(overall_largest, rev_range[0][1])
num_tasks += self.redis_client.zcount(
event_log_set, min=0, max=time.time())
if num_tasks == 0:
return 0, 0, 0
return overall_smallest, overall_largest, num_tasks
def cluster_resources(self):
"""Get the current total cluster resources.
Note that this information can grow stale as nodes are added to or
removed from the cluster.
Returns:
A dictionary mapping resource name to the total quantity of that
resource in the cluster.
"""
self._check_connected()
resources = defaultdict(int)
clients = self.node_table()
for client in clients:
# Only count resources from latest entries of live clients.
if client["Alive"]:
for key, value in client["Resources"].items():
resources[key] += value
return dict(resources)
def _live_client_ids(self):
"""Returns a set of client IDs corresponding to clients still alive."""
return {
client["NodeID"]
for client in self.node_table() if (client["Alive"])
}
def _available_resources_per_node(self):
"""Returns a dictionary mapping node id to avaiable resources."""
available_resources_by_id = {}
all_available_resources = \
self.global_state_accessor.get_all_available_resources()
for available_resource in all_available_resources:
message = ray.gcs_utils.AvailableResources.FromString(
available_resource)
# Calculate available resources for this node.
dynamic_resources = {}
for resource_id, capacity in \
message.resources_available.items():
dynamic_resources[resource_id] = capacity
# Update available resources for this node.
node_id = ray.utils.binary_to_hex(message.node_id)
available_resources_by_id[node_id] = dynamic_resources
# Update nodes in cluster.
node_ids = self._live_client_ids()
# Remove disconnected nodes.
for node_id in available_resources_by_id.keys():
if node_id not in node_ids:
del available_resources_by_id[node_id]
return available_resources_by_id
def available_resources(self):
"""Get the current available cluster resources.
This is different from `cluster_resources` in that this will return
idle (available) resources rather than total resources.
Note that this information can grow stale as tasks start and finish.
Returns:
A dictionary mapping resource name to the total quantity of that
resource in the cluster.
"""
self._check_connected()
available_resources_by_id = self._available_resources_per_node()
# Calculate total available resources.
total_available_resources = defaultdict(int)
for available_resources in available_resources_by_id.values():
for resource_id, num_available in available_resources.items():
total_available_resources[resource_id] += num_available
return dict(total_available_resources)
def actor_checkpoint_info(self, actor_id):
"""Get checkpoint info for the given actor id.
Args:
actor_id: Actor's ID.
Returns:
A dictionary with information about the actor's checkpoint IDs and
their timestamps.
"""
self._check_connected()
message = self._execute_command(
actor_id,
"RAY.TABLE_LOOKUP",
gcs_utils.TablePrefix.Value("ACTOR_CHECKPOINT_ID"),
"",
actor_id.binary(),
)
if message is None:
return None
gcs_entry = gcs_utils.GcsEntry.FromString(message)
entry = gcs_utils.ActorCheckpointIdData.FromString(
gcs_entry.entries[0])
checkpoint_ids = [
ray.ActorCheckpointID(checkpoint_id)
for checkpoint_id in entry.checkpoint_ids
]
return {
"ActorID": ray.utils.binary_to_hex(entry.actor_id),
"CheckpointIds": checkpoint_ids,
"Timestamps": list(entry.timestamps),
}
state = GlobalState()
"""A global object used to access the cluster's global state."""
def jobs():
"""Get a list of the jobs in the cluster (for debugging only).
Returns:
Information from the job table, namely a list of dicts with keys:
- "JobID" (identifier for the job),
- "DriverIPAddress" (IP address of the driver for this job),
- "DriverPid" (process ID of the driver for this job),
- "StartTime" (UNIX timestamp of the start time of this job),
- "StopTime" (UNIX timestamp of the stop time of this job, if any)
"""
return state.job_table()
def nodes():
"""Get a list of the nodes in the cluster (for debugging only).
Returns:
Information about the Ray clients in the cluster.
"""
return state.node_table()
def workers():
"""Get a list of the workers in the cluster.
Returns:
Information about the Ray workers in the cluster.
"""
return state.workers()
def current_node_id():
"""Return the node id of the current node.
For example, "node:172.10.5.34". This can be used as a custom resource,
e.g., {node_id: 1} to reserve the whole node, or {node_id: 0.001} to
just force placement on the node.
Returns:
Id of the current node.
"""
return (ray.resource_spec.NODE_ID_PREFIX +
ray._private.services.get_node_ip_address())
def node_ids():
"""Get a list of the node ids in the cluster.
For example, ["node:172.10.5.34", "node:172.42.3.77"]. These can be used
as custom resources, e.g., {node_id: 1} to reserve the whole node, or
{node_id: 0.001} to just force placement on the node.
Returns:
List of the node resource ids.
"""
node_ids = []
for node in nodes():
for k, v in node["Resources"].items():
if k.startswith(ray.resource_spec.NODE_ID_PREFIX):
node_ids.append(k)
return node_ids
def actors(actor_id=None):
"""Fetch actor info for one or more actor IDs (for debugging only).
Args:
actor_id: A hex string of the actor ID to fetch information about. If
this is None, then all actor information is fetched.
Returns:
Information about the actors.
"""
return state.actor_table(actor_id=actor_id)
def objects(object_ref=None):
"""Fetch and parse the object table info for one or more object refs.
Args:
object_ref: An object ref to fetch information about. If this is None,
then the entire object table is fetched.
Returns:
Information from the object table.
"""
return state.object_table(object_ref=object_ref)
def timeline(filename=None):
"""Return a list of profiling events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file by
passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling events.
Each profile event is a dictionary.
"""
return state.chrome_tracing_dump(filename=filename)
def object_transfer_timeline(filename=None):
"""Return a list of transfer events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file by
passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file. Make
sure to enable "Flow events" in the "View Options" menu.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling events.
Each profile event is a dictionary.
"""
return state.chrome_tracing_object_transfer_dump(filename=filename)
def cluster_resources():
"""Get the current total cluster resources.
Note that this information can grow stale as nodes are added to or removed
from the cluster.
Returns:
A dictionary mapping resource name to the total quantity of that
resource in the cluster.
"""
return state.cluster_resources()
def available_resources():
"""Get the current available cluster resources.
This is different from `cluster_resources` in that this will return idle
(available) resources rather than total resources.
Note that this information can grow stale as tasks start and finish.
Returns:
A dictionary mapping resource name to the total quantity of that
resource in the cluster.
"""
return state.available_resources()
|
the-stack_106_13934
|
from enforce_typing import enforce_types
import pytest
from typing import Union
from engine import AgentWallet, AgentBase
from web3tools import web3util, web3wallet
from web3tools.web3util import toBase18
from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens
#alice:
# 1. starts with an init OCEAN
# 2. creates a DT, and mints an init amount
# 3. creates a DT-OCEAN pool, and adds init liquidity
_OCEAN_INIT = 1000.0
_OCEAN_STAKE = 200.0
_DT_INIT = 100.0
_DT_STAKE = 20.0
_POOL_WEIGHT_DT = 3.0
_POOL_WEIGHT_OCEAN = 7.0
@pytest.fixture
def alice_info():
#only use this when there are >1 args into a test function and
# we need addresses to line up. Otherwise, use a more specific function.
return _alice_info()
@pytest.fixture
def alice_private_key() -> str:
return _alice_info().private_key
@pytest.fixture
def alice_agent():
class MockAgent(AgentBase.AgentBase):
def takeStep(self, state):
pass
agent = MockAgent("agent1",USD=0.0,OCEAN=0.0)
agent._wallet = _alice_info().agent_wallet
return agent
@pytest.fixture
def alice_agent_wallet() -> AgentWallet.AgentWallet:
return _alice_info().agent_wallet
@pytest.fixture
def alice_web3wallet() -> web3wallet.Web3Wallet:
return _alice_info().wallet
@pytest.fixture
def alice_DT() -> datatoken.Datatoken:
return _alice_info().DT
@pytest.fixture
def alice_pool():
return _alice_info().pool
@enforce_types
def _alice_info():
return _make_info(private_key_name='TEST_PRIVATE_KEY1')
@enforce_types
def _make_info(private_key_name:str):
class _Info:
def __init__(self):
self.private_key: Union[str, None] = None
self.agent_wallet: Union[AgentWallet.AgentWallet, None] = None
self.web3wallet: Union[web3wallet, None] = None
self.DT: Union[datatoken, None] = None
self.pool: Union[bool, None] = None
info = _Info()
network = web3util.get_network()
info.private_key = web3util.confFileValue(network, private_key_name)
info.agent_wallet = AgentWallet.AgentWallet(
OCEAN=_OCEAN_INIT,private_key=info.private_key)
info.web3wallet = info.agent_wallet._web3wallet
info.DT = _createDT(info.web3wallet)
info.pool = _createPool(DT=info.DT, web3_w=info.web3wallet)
return info
@enforce_types
def _createDT(web3_w:web3wallet.Web3Wallet)-> datatoken.Datatoken:
DT_address = dtfactory.DTFactory().createToken(
'foo', 'DT1', 'DT1', toBase18(_DT_INIT),from_wallet=web3_w)
DT = datatoken.Datatoken(DT_address)
DT.mint(web3_w.address, toBase18(_DT_INIT), from_wallet=web3_w)
return DT
@enforce_types
def _createPool(DT:datatoken.Datatoken, web3_w:web3wallet.Web3Wallet):
OCEAN = globaltokens.OCEANtoken()
#Create OCEAN-DT pool
p_address = bfactory.BFactory().newBPool(from_wallet=web3_w)
pool = bpool.BPool(p_address)
DT.approve(pool.address, toBase18(_DT_STAKE), from_wallet=web3_w)
OCEAN.approve(pool.address, toBase18(_OCEAN_STAKE),from_wallet=web3_w)
pool.bind(DT.address, toBase18(_DT_STAKE),
toBase18(_POOL_WEIGHT_DT), from_wallet=web3_w)
pool.bind(OCEAN.address, toBase18(_OCEAN_STAKE),
toBase18(_POOL_WEIGHT_OCEAN), from_wallet=web3_w)
pool.finalize(from_wallet=web3_w)
return pool
|
the-stack_106_13938
|
# -*- coding: utf-8 -*-
import argparse
import json
import os
import re
os.environ['PYWIKIBOT_DIR'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'protect-config')
import pymysql
import pywikibot
from config import (database, # pylint: disable=E0611,W0614
protect_config_page_name)
parser = argparse.ArgumentParser()
parser.add_argument('lang', nargs='?', default='zh')
parser.add_argument('wiki', nargs='?', default='wikipedia')
parser.add_argument('dbwiki', nargs='?', default='zhwiki')
args = parser.parse_args()
os.environ['TZ'] = 'UTC'
site = pywikibot.Site(args.lang, args.wiki)
site.login()
config_page = pywikibot.Page(site, protect_config_page_name[args.lang][args.wiki])
cfg = config_page.text
cfg = json.loads(cfg)
print(json.dumps(cfg, indent=4, ensure_ascii=False))
if not cfg['enable']:
exit('disabled\n')
db = pymysql.connect(host=database['host'],
user=database['user'],
passwd=database['passwd'],
db=database['db'],
charset=database['charset'])
cur = db.cursor()
cur.execute("""SELECT `title`, `count`, `protectedit`, `protectmove`, `redirect` FROM `MostTranscludedPages_page` WHERE `wiki` = %s AND `redirect` != 2 ORDER BY `title` ASC""", (args.dbwiki))
rows = cur.fetchall()
def check_required_protection(title, count):
if title.startswith('MediaWiki:'):
return 0
if title.startswith('User:'):
if title.endswith('.js') or title.endswith('.css') or title.endswith('.json'):
return 0
if count >= cfg['template_full'] and cfg['template_full'] > 0:
return 4
if count >= cfg['template_temp'] and cfg['template_temp'] > 0:
return 3
if count >= cfg['template_semi'] and cfg['template_semi'] > 0:
return 1
return 0
protection2number = {
'sysop': 4,
'templateeditor': 3,
'extendedconfirmed': 2,
'autoconfirmed': 1,
'': 0,
}
number2protection = {
4: 'sysop',
3: 'templateeditor',
2: 'extendedconfirmed',
1: 'autoconfirmed',
0: '',
}
for row in rows:
title = row[0]
count = row[1]
protectedit = row[2]
protectmove = row[3]
redirect = row[4]
required_protection = check_required_protection(title, count)
current_protection = protection2number[protectedit]
if required_protection > current_protection:
page = pywikibot.Page(site, title)
if not page.exists():
print('{} is not exist'.format(title))
continue
if 'exclude_regex' in cfg and cfg['exclude_regex'] != '' and re.search(cfg['exclude_regex'], title):
print('Ignore {}'.format(title))
continue
args = {
'reason': cfg['summary'].format(count),
'prompt': False,
'protections': {
'edit': number2protection[required_protection],
'move': number2protection[required_protection],
},
}
print(title, args)
page.protect(**args)
|
the-stack_106_13940
|
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
swagger_template = {
"swagger": "",
"openapi": "3.0.0",
"components": {
"securitySchemes": {
"BearerAuth": {
"type": "http",
"scheme": "bearer",
"bearerFormat": "JWT",
}
}
},
"definitions": {
"User": {
"type": "object",
"properties": {
"username": {"type": "string"},
"first_name": {"type": "string"},
"last_name": {"type": "string"},
"dob": {"type": "string", "format": "date"},
"phone_number": {"type": "string"},
"country": {"type": "string"},
"state": {"type": "string"},
"city": {"type": "string"},
"address_1": {"type": "string"},
"address_2": {"type": "string"},
"zip": {"type": "string"},
},
"example": {
"username": "sunmilee",
"first_name": "Sunmi",
"last_name": "Lee",
"dob": "2020-05-07",
"address": "1 Hacker Way",
},
},
"DiemCurrencies": {
"type": "string",
"enum": ["Coin1"],
},
"TransactionDirections": {
"type": "string",
"enum": ["received", "sent"],
},
"Transaction": {
"type": "object",
"properties": {
"id": {"type": "string"},
"amount": {"type": "integer"},
"currency": {"$ref": "#/definitions/DiemCurrencies"},
"direction": {"$ref": "#/definitions/TransactionDirections"},
"timestamp": {"type": "string", "format": "date-time"},
"source": {"$ref": "#/definitions/VaspAccountDetails"},
"destination": {"$ref": "#/definitions/VaspAccountDetails"},
"blockchain_tx": {"$ref": "#/definitions/BlockchainTransaction"},
},
},
"VaspAccountDetails": {
"type": "object",
"properties": {
"vasp_name": {"type": "string"},
"user_id": {"type": "string"},
},
},
"BlockchainTransaction": {
"type": "object",
"properties": {
"version": {"type": "integer"},
"status": {"type": "string"},
"expirationTime": {"type": "string"},
"source": {"type": "string"},
"destination": {"type": "string"},
"amount": {"type": "integer"},
"sequenceNumber": {"type": "integer"},
},
},
},
"security": [{"BearerAuth": []}],
}
|
the-stack_106_13942
|
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
>python tools/matcher.py data/schemas/npgcore_latest.ttl data/schemas/foaf.rdf
Loaded 630 triples
started scanning...
----------
Ontologies found: 1
Classes found...: 15
Properties found: 67
Annotation......: 7
Datatype........: 26
Object..........: 34
Loaded 3478 triples
started scanning...
----------
Ontologies found: 1
Classes found...: 64
Properties found: 253
Annotation......: 36
Datatype........: 133
Object..........: 84
----------
Matching...
Person ==~== Term: npg:Person
<Class *http://www.w3.org/2000/10/swap/pim/contact#Person*>
...<Class *http://ns.nature.com/terms/Person*>
Document ==~== Term: npg:Document
<Class *http://xmlns.com/foaf/0.1/Document*>
...<Class *http://ns.nature.com/terms/Document*>
Document ==~== Term: npg:DocumentAsset
<Class *http://xmlns.com/foaf/0.1/Document*>
...<Class *http://ns.nature.com/terms/DocumentAsset*>
Organization ==~== Term: npg:Organization
<Class *http://xmlns.com/foaf/0.1/Organization*>
...<Class *http://ns.nature.com/terms/Organization*>
Person ==~== Term: npg:Person
<Class *http://xmlns.com/foaf/0.1/Person*>
...<Class *http://ns.nature.com/terms/Person*>
PersonalProfileDocument ==~== Term: npg:Document
<Class *http://xmlns.com/foaf/0.1/PersonalProfileDocument*>
...<Class *http://ns.nature.com/terms/Document*>
"""
import csv
import optparse
import os
import time
from difflib import SequenceMatcher
import rdflib
from .. import main
from ..core.utils import *
USAGE = "ontospy-match foaf.rdf bibo.owl -o output.csv"
MATCHER_VERSION = 0.2
# from ontospy import ontospy
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
def matcher(
graph1,
graph2,
confidence=0.5,
output_file="matching_results.csv",
class_or_prop="classes",
verbose=False,
):
"""
takes two graphs and matches its classes based on qname, label etc..
@todo extend to properties and skos etc..
"""
printDebug("----------\nNow matching...")
f = open(output_file, "wt")
counter = 0
try:
writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(("name 1", "name 2", "uri 1", "uri 2"))
# a) match classes
if class_or_prop == "classes":
for x in graph1.all_classes:
l1 = str(x.bestLabel(qname_allowed=True))
for y in graph2.all_classes:
l2 = str(y.bestLabel(qname_allowed=True))
if similar(l1, l2) > confidence:
counter += 1
row = [l1, l2, x.uri, y.uri]
writer.writerow(
[s.encode("utf8") if type(s) is str else s for s in row]
)
if verbose:
print(("%s ==~== %s" % (l1, l2)))
# b) match properties
elif class_or_prop == "properties":
for x in graph1.all_properties:
l1 = str(x.bestLabel(qname_allowed=True))
for y in graph2.all_properties:
l2 = str(y.bestLabel(qname_allowed=True))
if similar(l1, l2) > confidence:
counter += 1
row = [l1, l2, x.uri, y.uri]
writer.writerow(
[s.encode("utf8") if type(s) is str else s for s in row]
)
if verbose:
print(("%s ==~== %s" % (l1, l2)))
finally:
f.close()
printDebug("%d candidates found." % counter)
def parse_options():
"""
parse_options() -> opts, args
Parse any command-line options given returning both
the parsed options and arguments.
https://docs.python.org/2/library/optparse.html
"""
parser = optparse.OptionParser(usage=USAGE, version=ontospy.VERSION)
parser.add_option(
"-o",
"--outputfile",
action="store",
type="string",
default="",
dest="outputfile",
help="The name of the output csv file.",
)
parser.add_option(
"-v",
"--verbose",
action="store_true",
default=False,
dest="verbose",
help="Verbose mode: prints results on screen too.",
)
opts, args = parser.parse_args()
return opts, args
import click
@click.command()
@click.option('-o', '--outputfile', default="", help = "The name of the output csv file.")
@click.option('-v', '--verbose', default=False,
help="Verbose mode: prints results on screen too.")
def _main(outputfile, verbose): pass
def main():
"""command line script"""
print(("Ontospy " + ontospy.VERSION))
ontospy.get_or_create_home_repo()
opts, args = parse_options()
if len(args) < 2:
printDebug("Please provide two arguments, or use -h for more options.")
sys.exit(0)
var = eval(input("Match classes or properties? [c|p, c=default]:"))
if var == "c":
class_or_prop = "classes"
elif var == "p":
class_or_prop = "properties"
else:
class_or_prop = "classes"
print(class_or_prop)
var = eval(input("Degree of confidence? [1-10, 5=default]: "))
try:
confidence = int(var)
if not (confidence <= 10 and confidence >= 1):
confidence = 5
except:
confidence = 5
print(confidence)
confidence = confidence / (10 * 1.0) # transform in decimal
sTime = time.time()
# automatically name the file unless a name is provided with -o option
if not opts.outputfile:
try:
opts.outputfile = "%s_%s_matching_%s.csv" % (
os.path.splitext(args[0])[0].split("/")[-1],
os.path.splitext(args[1])[0].split("/")[-1],
class_or_prop,
)
except:
opts.outputfile = f"ontospy_matching_{class_or_prop}.csv"
g1 = ontospy.Ontospy(args[0])
g2 = ontospy.Ontospy(args[1])
matcher(g1, g2, confidence, opts.outputfile, class_or_prop, opts.verbose)
# finally:
# print(some stats....)
eTime = time.time()
tTime = eTime - sTime
printDebug("-" * 10)
printDebug("Time: %0.2fs" % tTime)
if __name__ == "__main__":
import sys
try:
main()
sys.exit(0)
except KeyboardInterrupt as e: # Ctrl-C
raise e
|
the-stack_106_13946
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
log test
"""
import os
import sys
import time
import re
import shutil
import logging
def test_log_stdout():
# Clean up environment variables
_rm_env_config()
# print the logs without raising an exception.
from mindspore import log as logger
log_str = 'print informations'
logger.error("1 test log message info :%s", log_str)
logger.info("2 test log message info")
logger.warning("3 test log message warning")
logger.debug("4 test log message debug:%s", log_str)
# Clean up _global_logger to avoid affecting for next usecase
logger._global_logger = None
def test_log_default():
_rm_env_config()
from mindspore import log as logger
configdict = logger.get_log_config()
targetdict = {'GLOG_v': '2', 'GLOG_logtostderr': '1'}
assert configdict == targetdict
# Clean up _global_logger to avoid affecting for next usecase
logger._global_logger = None
def test_log_setlevel():
_rm_env_config()
os.environ['GLOG_v'] = '0'
from mindspore import log as logger
#logger_instance = logger._get_logger()
#del logger_instance
loglevel = logger.get_level()
log_str = 'print debug informations'
logger.debug("5 test log message debug:%s", log_str)
assert loglevel == '0'
# Clean up _global_logger to avoid affecting for next usecase
logger._global_logger = None
def test_log_file():
"""
test the log contect written in log file
"""
_rm_env_config()
file_path = '/tmp/log/mindspore_test'
os.environ['GLOG_v'] = '0'
os.environ['GLOG_logtostderr'] = '0'
os.environ['GLOG_log_dir'] = file_path
from mindspore import log as logger
filename = f'{file_path}/mindspore.log'
if os.path.exists(file_path):
shutil.rmtree(file_path)
os.makedirs(file_path, exist_ok=True)
# Clear test file
if os.path.exists(filename):
os.remove(filename)
logger.warning("test log message warning")
cmd = f'cat {filename}'
result = os.popen(cmd).read()
# pylint: disable=anomalous-backslash-in-string
pattern = "\[WARNING\] ME\(.*[0-9]:.*[0-9]\,.*[a-zA-Z0-9]\):.* " \
"\[.*:.*[0-9]\] test log message warning"
match_obj = re.match(pattern, result)
#Clear test file
if os.path.exists(file_path):
shutil.rmtree(file_path)
assert match_obj
# Clean up _global_logger to avoid affecting for next usecase
logger._global_logger = None
def test_log_backup_count():
"""
test backup count
"""
#logger.reset_log_config(level=logging.INFO, console=False,
# filepath=file_path, maxBytes=1000, backupCount=10)
_rm_env_config()
file_path = '/tmp/log/mindspore_test'
os.environ['GLOG_v'] = '1'
os.environ['GLOG_logtostderr'] = '0'
os.environ['GLOG_log_dir'] = file_path
os.environ['logger_maxBytes'] = '1000'
os.environ['logger_backupCount'] = '10'
from mindspore import log as logger
if os.path.exists(file_path):
shutil.rmtree(file_path)
os.makedirs(file_path, exist_ok=True)
log_count = 100
for i in range(0, log_count, 1):
logger.warning("test log message warning %r", i)
cmd = f'cd {file_path};ls |wc -l'
backup_count = '11'
file_count = os.popen(cmd).read().strip()
if os.path.exists(file_path):
shutil.rmtree(file_path)
assert file_count == backup_count
# Clean up _global_logger to avoid affecting for next usecase
logger._global_logger = None
def test_log_verify_envconfig():
"""
test reset config
"""
dictlist = []
from mindspore import log as logger
file_path = '/tmp'
# level is not a number
_rm_env_config()
os.environ['GLOG_v'] = 'test'
verify_dict_0 = logger._get_env_config()
# level is not in range
_rm_env_config()
os.environ['GLOG_v'] = '100'
verify_dict_1 = logger._get_env_config()
# console is not a number
_rm_env_config()
os.environ['GLOG_logtostderr'] = 'test'
verify_dict_2 = logger._get_env_config()
# console is not in range
_rm_env_config()
os.environ['GLOG_logtostderr'] = '6'
verify_dict_3 = logger._get_env_config()
# path does not exist
_rm_env_config()
os.environ['GLOG_logtostderr'] = '0'
os.environ['GLOG_log_dir'] = '/test'
verify_dict_4 = logger._get_env_config()
# path is not configured
_rm_env_config()
os.environ['GLOG_logtostderr'] = '0'
verify_dict_5 = logger._get_env_config()
# logger_maxBytes is not a number
_rm_env_config()
os.environ['GLOG_logtostderr'] = '0'
os.environ['GLOG_log_dir'] = '/tmp'
os.environ['logger_maxBytes'] = 'test'
os.environ['logger_backupCount'] = '10'
verify_dict_6 = logger._get_env_config()
# logger_maxBytes is a negative number
_rm_env_config()
os.environ['GLOG_logtostderr'] = '0'
os.environ['GLOG_log_dir'] = '/tmp'
os.environ['logger_maxBytes'] = '-1'
os.environ['logger_backupCount'] = '10'
verify_dict_7 = logger._get_env_config()
# logger_backupCount is not a number
_rm_env_config()
os.environ['GLOG_logtostderr'] = '0'
os.environ['GLOG_log_dir'] = '/tmp'
os.environ['logger_maxBytes'] = '0'
os.environ['logger_backupCount'] = 'test'
verify_dict_8 = logger._get_env_config()
# logger_backupCount is a negative number
_rm_env_config()
os.environ['GLOG_logtostderr'] = '0'
os.environ['GLOG_log_dir'] = '/tmp'
os.environ['logger_maxBytes'] = '0'
os.environ['logger_backupCount'] = '-1'
verify_dict_9 = logger._get_env_config()
for i in range(0, 10, 1):
variable_name = f'verify_dict_{i}'
dictlist.append(locals()[variable_name])
for verify_dict in dictlist:
try:
logger._verify_config(verify_dict)
except ValueError as ve:
print(ve)
assert True
except TypeError as te:
print(te)
assert True
else:
assert False
# Clean up _global_logger to avoid affecting for next usecase
logger._global_logger = None
def test_log_repeated_print():
"""
test Log repeated printing
# Print one log is right, otherwise error
"""
_rm_env_config()
from mindspore import log as logger
py_logging = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
py_logging.addHandler(handler)
logger.info("test log message info test ")
# Clean up _global_logger to avoid affecting for next usecase
logger._global_logger = None
def test_log_getconfig():
_rm_env_config()
os.environ['GLOG_v'] = '3'
os.environ['GLOG_logtostderr'] = '0'
os.environ['GLOG_log_dir'] = '/tmp/log/'
os.environ['logger_maxBytes'] = '1000'
os.environ['logger_backupCount'] = '10'
from mindspore import log as logger
logger.info("test log message info test ")
configdict = logger.get_log_config()
targetdict = {'GLOG_v': '3', 'GLOG_log_dir': '/tmp/log',
'GLOG_logtostderr': '0', 'logger_maxBytes': 1000, 'logger_backupCount': 10}
assert configdict == targetdict
# Clean up _global_logger to avoid affecting for next usecase
logger._global_logger = None
def test_log_perf():
"""
Performance test with python logging
"""
_rm_env_config()
os.environ['GLOG_v'] = '3'
from mindspore import log as logger
loglevel = logging.ERROR
logging.basicConfig()
py_logging = logging.getLogger()
py_logging.setLevel(loglevel)
log_count = 100000
print("logger level:", logger.get_level())
print("py_logging level:", py_logging.getEffectiveLevel())
# Calculate PY logging execution time
start_time_py_logging = int(round(time.time() * 1000))
for i in range(0, log_count, 1):
py_logging.info("test log message info :%r", i)
end_time_py_logging = int(round(time.time() * 1000))
time_diff_py_logging = end_time_py_logging - start_time_py_logging
# Calculate MS logger execution time
start_time_logger = int(round(time.time() * 1000))
for i in range(0, log_count, 1):
logger.info("test log message info :%r", i)
end_time_logger = int(round(time.time() * 1000))
time_diff_logger = end_time_logger - start_time_logger
# Calculate time difference
time_diff = time_diff_logger - time_diff_py_logging
strprint = f'time difference between MS logger ' \
f'and Python logging: {time_diff} ms'
print(strprint)
std_time = 2000
assert time_diff < std_time
# Clean up _global_logger to avoid affecting for next usecase
logger._global_logger = None
def test_log_ms_import():
_rm_env_config()
import mindspore as ms
configdict = ms.get_log_config()
targetdict = {'GLOG_v': '2', 'GLOG_logtostderr': '1'}
level = ms.get_level()
assert configdict == targetdict and level == '2'
def _rm_env_config():
envlist = ['GLOG_v', 'GLOG_logtostderr', 'GLOG_log_dir', 'logger_maxBytes', 'logger_backupCount']
for env in envlist:
if os.environ.get(env):
del os.environ[env]
|
the-stack_106_13948
|
from seleniumbase import BaseCase
import cv2
import time
class ComponentsTest(BaseCase):
def test_basic(self):
# open the app and take a screenshot
self.open("http://localhost:8501")
time.sleep(10) # give leaflet time to load from web
self.save_screenshot("current-screenshot.png")
# automated visual regression testing
# tests page has identical structure to baseline
# https://github.com/seleniumbase/SeleniumBase/tree/master/examples/visual_testing
# level 2 chosen, as id values dynamically generated on each page run
self.check_window(name="first_test", level=2)
# check folium app-specific parts
# automated test level=2 only checks structure, not content
self.assert_text("streamlit-folium")
# test screenshots look exactly the same
original = cv2.imread(
"visual_baseline/test_package.test_basic/first_test/screenshot.png"
)
duplicate = cv2.imread("current-screenshot.png")
assert original.shape == duplicate.shape
difference = cv2.subtract(original, duplicate)
b, g, r = cv2.split(difference)
assert cv2.countNonZero(b) == cv2.countNonZero(g) == cv2.countNonZero(r) == 0
|
the-stack_106_13949
|
import re
import pytest
from pandas._libs.tslibs import Timedelta, offsets, to_offset
@pytest.mark.parametrize(
"freq_input,expected",
[
(to_offset("10us"), offsets.Micro(10)),
(offsets.Hour(), offsets.Hour()),
((5, "T"), offsets.Minute(5)),
("2h30min", offsets.Minute(150)),
("2h 30min", offsets.Minute(150)),
("2h30min15s", offsets.Second(150 * 60 + 15)),
("2h 60min", offsets.Hour(3)),
("2h 20.5min", offsets.Second(8430)),
("1.5min", offsets.Second(90)),
("0.5S", offsets.Milli(500)),
("15l500u", offsets.Micro(15500)),
("10s75L", offsets.Milli(10075)),
("1s0.25ms", offsets.Micro(1000250)),
("1s0.25L", offsets.Micro(1000250)),
("2800N", offsets.Nano(2800)),
("2SM", offsets.SemiMonthEnd(2)),
("2SM-16", offsets.SemiMonthEnd(2, day_of_month=16)),
("2SMS-14", offsets.SemiMonthBegin(2, day_of_month=14)),
("2SMS-15", offsets.SemiMonthBegin(2)),
],
)
def test_to_offset(freq_input, expected):
result = to_offset(freq_input)
assert result == expected
@pytest.mark.parametrize(
"freqstr,expected", [("-1S", -1), ("-2SM", -2), ("-1SMS", -1), ("-5min10s", -310)]
)
def test_to_offset_negative(freqstr, expected):
result = to_offset(freqstr)
assert result.n == expected
@pytest.mark.parametrize(
"freqstr",
[
"2h20m",
"U1",
"-U",
"3U1",
"-2-3U",
"-2D:3H",
"1.5.0S",
"2SMS-15-15",
"2SMS-15D",
"100foo",
# Invalid leading +/- signs.
"+-1d",
"-+1h",
"+1",
"-7",
"+d",
"-m",
# Invalid shortcut anchors.
"SM-0",
"SM-28",
"SM-29",
"SM-FOO",
"BSM",
"SM--1",
"SMS-1",
"SMS-28",
"SMS-30",
"SMS-BAR",
"SMS-BYR",
"BSMS",
"SMS--2",
],
)
def test_to_offset_invalid(freqstr):
# see gh-13930
# We escape string because some of our
# inputs contain regex special characters.
msg = re.escape(f"Invalid frequency: {freqstr}")
with pytest.raises(ValueError, match=msg):
to_offset(freqstr)
def test_to_offset_no_evaluate():
with pytest.raises(ValueError, match="Could not evaluate"):
to_offset(("", ""))
@pytest.mark.parametrize(
"freqstr,expected",
[
("2D 3H", offsets.Hour(51)),
("2 D3 H", offsets.Hour(51)),
("2 D 3 H", offsets.Hour(51)),
(" 2 D 3 H ", offsets.Hour(51)),
(" H ", offsets.Hour()),
(" 3 H ", offsets.Hour(3)),
],
)
def test_to_offset_whitespace(freqstr, expected):
result = to_offset(freqstr)
assert result == expected
@pytest.mark.parametrize(
"freqstr,expected", [("00H 00T 01S", 1), ("-00H 03T 14S", -194)]
)
def test_to_offset_leading_zero(freqstr, expected):
result = to_offset(freqstr)
assert result.n == expected
@pytest.mark.parametrize("freqstr,expected", [("+1d", 1), ("+2h30min", 150)])
def test_to_offset_leading_plus(freqstr, expected):
result = to_offset(freqstr)
assert result.n == expected
@pytest.mark.parametrize(
"kwargs,expected",
[
(dict(days=1, seconds=1), offsets.Second(86401)),
(dict(days=-1, seconds=1), offsets.Second(-86399)),
(dict(hours=1, minutes=10), offsets.Minute(70)),
(dict(hours=1, minutes=-10), offsets.Minute(50)),
(dict(weeks=1), offsets.Day(7)),
(dict(hours=1), offsets.Hour(1)),
(dict(hours=1), to_offset("60min")),
(dict(microseconds=1), offsets.Micro(1)),
(dict(microseconds=0), offsets.Nano(0)),
],
)
def test_to_offset_pd_timedelta(kwargs, expected):
# see gh-9064
td = Timedelta(**kwargs)
result = to_offset(td)
assert result == expected
@pytest.mark.parametrize(
"shortcut,expected",
[
("W", offsets.Week(weekday=6)),
("W-SUN", offsets.Week(weekday=6)),
("Q", offsets.QuarterEnd(startingMonth=12)),
("Q-DEC", offsets.QuarterEnd(startingMonth=12)),
("Q-MAY", offsets.QuarterEnd(startingMonth=5)),
("SM", offsets.SemiMonthEnd(day_of_month=15)),
("SM-15", offsets.SemiMonthEnd(day_of_month=15)),
("SM-1", offsets.SemiMonthEnd(day_of_month=1)),
("SM-27", offsets.SemiMonthEnd(day_of_month=27)),
("SMS-2", offsets.SemiMonthBegin(day_of_month=2)),
("SMS-27", offsets.SemiMonthBegin(day_of_month=27)),
],
)
def test_anchored_shortcuts(shortcut, expected):
result = to_offset(shortcut)
assert result == expected
|
the-stack_106_13952
|
import typing # noqa: F401
from kubernetes import client # noqa: F401
from kuber import kube_api as _kube_api # noqa: F401
from kuber import definitions as _kuber_definitions # noqa: F401
from kuber import _types # noqa: F401
from kuber.v1_20.meta_v1 import ListMeta # noqa: F401
from kuber.v1_20.meta_v1 import ObjectMeta # noqa: F401
from kuber.v1_20.core_v1 import Toleration # noqa: F401
class Overhead(_kuber_definitions.Definition):
"""
Overhead structure represents the resource overhead
associated with running a pod.
"""
def __init__(
self,
pod_fixed: dict = None,
):
"""Create Overhead instance."""
super(Overhead, self).__init__(api_version="node/v1", kind="Overhead")
self._properties = {
"podFixed": pod_fixed if pod_fixed is not None else {},
}
self._types = {
"podFixed": (dict, None),
}
@property
def pod_fixed(self) -> dict:
"""
PodFixed represents the fixed resource overhead associated
with running a pod.
"""
return typing.cast(
dict,
self._properties.get("podFixed"),
)
@pod_fixed.setter
def pod_fixed(self, value: dict):
"""
PodFixed represents the fixed resource overhead associated
with running a pod.
"""
self._properties["podFixed"] = value
def __enter__(self) -> "Overhead":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class RuntimeClass(_kuber_definitions.Resource):
"""
RuntimeClass defines a class of container runtime supported
in the cluster. The RuntimeClass is used to determine which
container runtime is used to run all containers in a pod.
RuntimeClasses are manually defined by a user or cluster
provisioner, and referenced in the PodSpec. The Kubelet is
responsible for resolving the RuntimeClassName reference
before running the pod. For more details, see
https://kubernetes.io/docs/concepts/containers/runtime-
class/
"""
def __init__(
self,
handler: str = None,
metadata: "ObjectMeta" = None,
overhead: "Overhead" = None,
scheduling: "Scheduling" = None,
):
"""Create RuntimeClass instance."""
super(RuntimeClass, self).__init__(api_version="node/v1", kind="RuntimeClass")
self._properties = {
"handler": handler if handler is not None else "",
"metadata": metadata if metadata is not None else ObjectMeta(),
"overhead": overhead if overhead is not None else Overhead(),
"scheduling": scheduling if scheduling is not None else Scheduling(),
}
self._types = {
"apiVersion": (str, None),
"handler": (str, None),
"kind": (str, None),
"metadata": (ObjectMeta, None),
"overhead": (Overhead, None),
"scheduling": (Scheduling, None),
}
@property
def handler(self) -> str:
"""
Handler specifies the underlying runtime and configuration
that the CRI implementation will use to handle pods of this
class. The possible values are specific to the node & CRI
configuration. It is assumed that all handlers are
available on every node, and handlers of the same name are
equivalent on every node. For example, a handler called
"runc" might specify that the runc OCI runtime (using native
Linux containers) will be used to run the containers in a
pod. The Handler must be lowercase, conform to the DNS Label
(RFC 1123) requirements, and is immutable.
"""
return typing.cast(
str,
self._properties.get("handler"),
)
@handler.setter
def handler(self, value: str):
"""
Handler specifies the underlying runtime and configuration
that the CRI implementation will use to handle pods of this
class. The possible values are specific to the node & CRI
configuration. It is assumed that all handlers are
available on every node, and handlers of the same name are
equivalent on every node. For example, a handler called
"runc" might specify that the runc OCI runtime (using native
Linux containers) will be used to run the containers in a
pod. The Handler must be lowercase, conform to the DNS Label
(RFC 1123) requirements, and is immutable.
"""
self._properties["handler"] = value
@property
def metadata(self) -> "ObjectMeta":
"""
More info:
https://git.k8s.io/community/contributors/devel/sig-
architecture/api-conventions.md#metadata
"""
return typing.cast(
"ObjectMeta",
self._properties.get("metadata"),
)
@metadata.setter
def metadata(self, value: typing.Union["ObjectMeta", dict]):
"""
More info:
https://git.k8s.io/community/contributors/devel/sig-
architecture/api-conventions.md#metadata
"""
if isinstance(value, dict):
value = typing.cast(
ObjectMeta,
ObjectMeta().from_dict(value),
)
self._properties["metadata"] = value
@property
def overhead(self) -> "Overhead":
"""
Overhead represents the resource overhead associated with
running a pod for a given RuntimeClass. For more details,
see
https://kubernetes.io/docs/concepts/scheduling-
eviction/pod-overhead/
This field is in beta starting v1.18 and is only honored by
servers that enable the PodOverhead feature.
"""
return typing.cast(
"Overhead",
self._properties.get("overhead"),
)
@overhead.setter
def overhead(self, value: typing.Union["Overhead", dict]):
"""
Overhead represents the resource overhead associated with
running a pod for a given RuntimeClass. For more details,
see
https://kubernetes.io/docs/concepts/scheduling-
eviction/pod-overhead/
This field is in beta starting v1.18 and is only honored by
servers that enable the PodOverhead feature.
"""
if isinstance(value, dict):
value = typing.cast(
Overhead,
Overhead().from_dict(value),
)
self._properties["overhead"] = value
@property
def scheduling(self) -> "Scheduling":
"""
Scheduling holds the scheduling constraints to ensure that
pods running with this RuntimeClass are scheduled to nodes
that support it. If scheduling is nil, this RuntimeClass is
assumed to be supported by all nodes.
"""
return typing.cast(
"Scheduling",
self._properties.get("scheduling"),
)
@scheduling.setter
def scheduling(self, value: typing.Union["Scheduling", dict]):
"""
Scheduling holds the scheduling constraints to ensure that
pods running with this RuntimeClass are scheduled to nodes
that support it. If scheduling is nil, this RuntimeClass is
assumed to be supported by all nodes.
"""
if isinstance(value, dict):
value = typing.cast(
Scheduling,
Scheduling().from_dict(value),
)
self._properties["scheduling"] = value
def create_resource(self, namespace: "str" = None):
"""
Creates the RuntimeClass in the currently
configured Kubernetes cluster.
"""
names = ["create_namespaced_runtime_class", "create_runtime_class"]
_kube_api.execute(
action="create",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict()},
)
def replace_resource(self, namespace: "str" = None):
"""
Replaces the RuntimeClass in the currently
configured Kubernetes cluster.
"""
names = ["replace_namespaced_runtime_class", "replace_runtime_class"]
_kube_api.execute(
action="replace",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict(), "name": self.metadata.name},
)
def patch_resource(self, namespace: "str" = None):
"""
Patches the RuntimeClass in the currently
configured Kubernetes cluster.
"""
names = ["patch_namespaced_runtime_class", "patch_runtime_class"]
_kube_api.execute(
action="patch",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict(), "name": self.metadata.name},
)
def get_resource_status(self, namespace: "str" = None):
"""This resource does not have a status."""
pass
def read_resource(self, namespace: str = None):
"""
Reads the RuntimeClass from the currently configured
Kubernetes cluster and returns the low-level definition object.
"""
names = [
"read_namespaced_runtime_class",
"read_runtime_class",
]
return _kube_api.execute(
action="read",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"name": self.metadata.name},
)
def delete_resource(
self,
namespace: str = None,
propagation_policy: str = "Foreground",
grace_period_seconds: int = 10,
):
"""
Deletes the RuntimeClass from the currently configured
Kubernetes cluster.
"""
names = [
"delete_namespaced_runtime_class",
"delete_runtime_class",
]
body = client.V1DeleteOptions(
propagation_policy=propagation_policy,
grace_period_seconds=grace_period_seconds,
)
_kube_api.execute(
action="delete",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"name": self.metadata.name, "body": body},
)
@staticmethod
def get_resource_api(
api_client: client.ApiClient = None, **kwargs
) -> "client.NodeV1Api":
"""
Returns an instance of the kubernetes API client associated with
this object.
"""
if api_client:
kwargs["apl_client"] = api_client
return client.NodeV1Api(**kwargs)
def __enter__(self) -> "RuntimeClass":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class RuntimeClassList(_kuber_definitions.Collection):
"""
RuntimeClassList is a list of RuntimeClass objects.
"""
def __init__(
self,
items: typing.List["RuntimeClass"] = None,
metadata: "ListMeta" = None,
):
"""Create RuntimeClassList instance."""
super(RuntimeClassList, self).__init__(
api_version="node/v1", kind="RuntimeClassList"
)
self._properties = {
"items": items if items is not None else [],
"metadata": metadata if metadata is not None else ListMeta(),
}
self._types = {
"apiVersion": (str, None),
"items": (list, RuntimeClass),
"kind": (str, None),
"metadata": (ListMeta, None),
}
@property
def items(self) -> typing.List["RuntimeClass"]:
"""
Items is a list of schema objects.
"""
return typing.cast(
typing.List["RuntimeClass"],
self._properties.get("items"),
)
@items.setter
def items(
self, value: typing.Union[typing.List["RuntimeClass"], typing.List[dict]]
):
"""
Items is a list of schema objects.
"""
cleaned: typing.List[RuntimeClass] = []
for item in value:
if isinstance(item, dict):
item = typing.cast(
RuntimeClass,
RuntimeClass().from_dict(item),
)
cleaned.append(typing.cast(RuntimeClass, item))
self._properties["items"] = cleaned
@property
def metadata(self) -> "ListMeta":
"""
Standard list metadata. More info:
https://git.k8s.io/community/contributors/devel/sig-
architecture/api-conventions.md#metadata
"""
return typing.cast(
"ListMeta",
self._properties.get("metadata"),
)
@metadata.setter
def metadata(self, value: typing.Union["ListMeta", dict]):
"""
Standard list metadata. More info:
https://git.k8s.io/community/contributors/devel/sig-
architecture/api-conventions.md#metadata
"""
if isinstance(value, dict):
value = typing.cast(
ListMeta,
ListMeta().from_dict(value),
)
self._properties["metadata"] = value
@staticmethod
def get_resource_api(
api_client: client.ApiClient = None, **kwargs
) -> "client.NodeV1Api":
"""
Returns an instance of the kubernetes API client associated with
this object.
"""
if api_client:
kwargs["apl_client"] = api_client
return client.NodeV1Api(**kwargs)
def __enter__(self) -> "RuntimeClassList":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class Scheduling(_kuber_definitions.Definition):
"""
Scheduling specifies the scheduling constraints for nodes
supporting a RuntimeClass.
"""
def __init__(
self,
node_selector: dict = None,
tolerations: typing.List["Toleration"] = None,
):
"""Create Scheduling instance."""
super(Scheduling, self).__init__(api_version="node/v1", kind="Scheduling")
self._properties = {
"nodeSelector": node_selector if node_selector is not None else {},
"tolerations": tolerations if tolerations is not None else [],
}
self._types = {
"nodeSelector": (dict, None),
"tolerations": (list, Toleration),
}
@property
def node_selector(self) -> dict:
"""
nodeSelector lists labels that must be present on nodes that
support this RuntimeClass. Pods using this RuntimeClass can
only be scheduled to a node matched by this selector. The
RuntimeClass nodeSelector is merged with a pod's existing
nodeSelector. Any conflicts will cause the pod to be
rejected in admission.
"""
return typing.cast(
dict,
self._properties.get("nodeSelector"),
)
@node_selector.setter
def node_selector(self, value: dict):
"""
nodeSelector lists labels that must be present on nodes that
support this RuntimeClass. Pods using this RuntimeClass can
only be scheduled to a node matched by this selector. The
RuntimeClass nodeSelector is merged with a pod's existing
nodeSelector. Any conflicts will cause the pod to be
rejected in admission.
"""
self._properties["nodeSelector"] = value
@property
def tolerations(self) -> typing.List["Toleration"]:
"""
tolerations are appended (excluding duplicates) to pods
running with this RuntimeClass during admission, effectively
unioning the set of nodes tolerated by the pod and the
RuntimeClass.
"""
return typing.cast(
typing.List["Toleration"],
self._properties.get("tolerations"),
)
@tolerations.setter
def tolerations(
self, value: typing.Union[typing.List["Toleration"], typing.List[dict]]
):
"""
tolerations are appended (excluding duplicates) to pods
running with this RuntimeClass during admission, effectively
unioning the set of nodes tolerated by the pod and the
RuntimeClass.
"""
cleaned: typing.List[Toleration] = []
for item in value:
if isinstance(item, dict):
item = typing.cast(
Toleration,
Toleration().from_dict(item),
)
cleaned.append(typing.cast(Toleration, item))
self._properties["tolerations"] = cleaned
def __enter__(self) -> "Scheduling":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
|
the-stack_106_13954
|
from abc import abstractmethod
import numpy as np
class Survival:
"""
The survival process is implemented inheriting from this class, which selects from a population only
specific individuals to survive.
"""
def __init__(self, filter_infeasible) -> None:
super().__init__()
self.filter_infeasible = filter_infeasible
def do(self, problem, pop, n_survive, **kwargs):
# if the split should be done beforehand
if split_by_feasibility and problem.n_constr > 0:
feasible, infeasible = split_by_feasibility(pop, sort_infeasbible_by_cv=True)
# finally the survivors to be returned
survivors = pop.new()
# if feasible solution do exist
if len(feasible) > 0:
survivors = self._do(pop[feasible], min(len(feasible), n_survive), **kwargs)
# if infeasible solutions needs to be added
if len(survivors) < n_survive:
least_infeasible = infeasible[:n_survive - len(feasible)]
survivors = survivors.merge(pop[least_infeasible])
else:
survivors = self._do(pop, n_survive, **kwargs)
return survivors
@abstractmethod
def _do(self, problem, pop, n_survive, **kwargs):
pass
def split_by_feasibility(pop, sort_infeasbible_by_cv=True):
CV = pop.get("CV")
b = (CV <= 0)
feasible = np.where(b)[0]
infeasible = np.where(np.logical_not(b))[0]
if sort_infeasbible_by_cv:
infeasible = infeasible[np.argsort(CV[infeasible,0])]
return feasible, infeasible
|
the-stack_106_13956
|
#!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
import kfp.gcp as gcp
import kfp.components as comp
import datetime
import json
import os
dataflow_python_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/785d474699cffb7463986b9abc4b1fbe03796cb6/components/gcp/dataflow/launch_python/component.yaml')
cloudml_train_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/785d474699cffb7463986b9abc4b1fbe03796cb6/components/gcp/ml_engine/train/component.yaml')
cloudml_deploy_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/785d474699cffb7463986b9abc4b1fbe03796cb6/components/gcp/ml_engine/deploy/component.yaml')
def resnet_preprocess_op(project_id: 'GcpProject', output: 'GcsUri', staging_dir: 'GcsUri', train_csv: 'GcsUri[text/csv]',
validation_csv: 'GcsUri[text/csv]', labels, train_size: 'Integer', validation_size: 'Integer',
step_name='preprocess'):
return dataflow_python_op(
python_file_path='gs://ml-pipeline-playground/samples/ml_engine/resnet-cmle/preprocess/preprocess.py',
project_id=project_id,
requirements_file_path='gs://ml-pipeline-playground/samples/ml_engine/resnet-cmle/preprocess/requirements.txt',
staging_dir=staging_dir,
args=json.dumps([
'--train_csv', str(train_csv),
'--validation_csv', str(validation_csv),
'--labels', str(labels),
'--output_dir', str(output),
'--train_size', str(train_size),
'--validation_size', str(validation_size)
])
)
def resnet_train_op(project_id, data_dir, output: 'GcsUri', region: 'GcpRegion', depth: int, train_batch_size: int,
eval_batch_size: int, steps_per_eval: int, train_steps: int, num_train_images: int,
num_eval_images: int, num_label_classes: int, tf_version, step_name='train'):
return cloudml_train_op(
project_id=project_id,
region='us-central1',
python_module='trainer.resnet_main',
package_uris=json.dumps(
['gs://ml-pipeline-playground/samples/ml_engine/resnet-cmle/trainer/trainer-1.0.tar.gz']),
job_dir=output,
args=json.dumps([
'--data_dir', str(data_dir),
'--model_dir', str(output),
'--use_tpu', 'True',
'--resnet_depth', str(depth),
'--train_batch_size', str(train_batch_size),
'--eval_batch_size', str(eval_batch_size),
'--steps_per_eval', str(steps_per_eval),
'--train_steps', str(train_steps),
'--num_train_images', str(num_train_images),
'--num_eval_images', str(num_eval_images),
'--num_label_classes', str(num_label_classes),
'--export_dir', '{}/export'.format(str(output))
]),
runtime_version=tf_version,
training_input=json.dumps({
'scaleTier': 'BASIC_TPU'
})
)
def resnet_deploy_op(model_dir, model, version, project_id: 'GcpProject', region: 'GcpRegion',
tf_version, step_name='deploy'):
# TODO(hongyes): add region to model payload.
return cloudml_deploy_op(
model_uri=model_dir,
project_id=project_id,
model_id=model,
version_id=version,
runtime_version=tf_version,
replace_existing_version='True'
)
@dsl.pipeline(
name='ResNet_Train_Pipeline',
description='Demonstrate the ResNet50 predict.'
)
def resnet_train(
project_id,
output,
region='us-central1',
model='bolts',
version='beta1',
tf_version='1.12',
train_csv='gs://bolts_image_dataset/bolt_images_train.csv',
validation_csv='gs://bolts_image_dataset/bolt_images_validate.csv',
labels='gs://bolts_image_dataset/labels.txt',
depth=50,
train_batch_size=1024,
eval_batch_size=1024,
steps_per_eval=250,
train_steps=10000,
num_train_images=218593,
num_eval_images=54648,
num_label_classes=10):
output_dir = os.path.join(str(output), '{{workflow.name}}')
preprocess_staging = os.path.join(output_dir, 'staging')
preprocess_output = os.path.join(output_dir, 'preprocessed_output')
train_output = os.path.join(output_dir, 'model')
preprocess = resnet_preprocess_op(project_id, preprocess_output, preprocess_staging, train_csv,
validation_csv, labels, train_batch_size, eval_batch_size).apply(gcp.use_gcp_secret())
train = resnet_train_op(project_id, preprocess_output, train_output, region, depth, train_batch_size,
eval_batch_size, steps_per_eval, train_steps, num_train_images, num_eval_images,
num_label_classes, tf_version).apply(gcp.use_gcp_secret())
train.after(preprocess)
export_output = os.path.join(str(train.outputs['job_dir']), 'export')
deploy = resnet_deploy_op(export_output, model, version, project_id, region,
tf_version).apply(gcp.use_gcp_secret())
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(resnet_train, __file__ + '.zip')
|
the-stack_106_13957
|
######################################################################################################################
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Amazon Software License (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://aws.amazon.com/asl/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import boto3
from botocore.client import Config
from xml.dom import minidom
import os
import logging
import datetime, sys, json, urllib2, urllib, re
log_level = str(os.environ.get('LOG_LEVEL')).upper()
if log_level not in ['DEBUG', 'INFO','WARNING', 'ERROR','CRITICAL']:
log_level = 'ERROR'
log = logging.getLogger()
log.setLevel(log_level)
bucket_name=str(os.environ.get('BUCKET_NAME'))
bucket_prefix=str(os.environ.get('BUCKET_PREFIX'))
config_file=str(os.environ.get('CONFIG_FILE'))
EIP=str(os.environ.get('EIP'))
PIP=str(os.environ.get('PIP'))
HUB_TAG=str(os.environ.get('HUB_TAG'))
HUB_TAG_VALUE=str(os.environ.get('HUB_TAG_VALUE'))
BGP_ASN=int(os.environ.get('BGP_ASN'))
UUID=''
LOG_LEVEL='INFO'
#VGW tags come in the format of [{"Key": "Tag1", "Value":"Tag1value"},{"Key":"Tag2","Value":"Tag2value"}]
#This function converts the array of Key/Value dicts to a single tag dictionary
def getTags(vgwTags):
tags = {}
for subTag in vgwTags:
tags[subTag['Key']] = subTag['Value']
return tags
#This function adds a <transit_vpc_config /> block to an existing XML doc and returns the new XML
def updateConfigXML(xml, vgwTags, account_id,spoke_subnet,csr_number):
xmldoc=minidom.parseString(xml)
#Create TransitVPC config xml block
transitConfig= xmldoc.createElement("transit_vpc_config")
#Create Account ID xml block
newXml = xmldoc.createElement("account_id")
newXml.appendChild(xmldoc.createTextNode(account_id))
transitConfig.appendChild(newXml)
#Create VPN Endpoint xml block
newXml = xmldoc.createElement("vpn_endpoint")
newXml.appendChild(xmldoc.createTextNode(csr_number))
transitConfig.appendChild(newXml)
#create VPN Spoke subnet xml block
newXml = xmldoc.createElement("spoke_subnet")
newXml.appendChild(xmldoc.createTextNode(spoke_subnet))
transitConfig.appendChild(newXml)
#create Transit IPSec local INT xml block
newXml = xmldoc.createElement("customer_local_ip")
newXml.appendChild(xmldoc.createTextNode(PIP))
transitConfig.appendChild(newXml)
#Create status xml block (create = tagged to create spoke, delete = tagged as spoke, but not with the correct spoke tag value)
newXml = xmldoc.createElement("status")
if vgwTags[HUB_TAG] == HUB_TAG_VALUE:
newXml.appendChild(xmldoc.createTextNode("create"))
else:
newXml.appendChild(xmldoc.createTextNode("delete"))
transitConfig.appendChild(newXml)
#Add transit config to XML
xmldoc.childNodes[0].appendChild(transitConfig)
return str(xmldoc.toxml())
def lambda_handler(event, context):
#Figure out the account number by parsing this function's ARN
account_id = re.findall(':(\d+):', context.invoked_function_arn)[0]
#Retrieve Transit VPC configuration from transit_vpn_config.txt
s3=boto3.client('s3', config=Config(signature_version='s3v4'))
log.info('Getting config file %s/%s%s',bucket_name, bucket_prefix)
log.info('Retrieved IP of transit VPN gateways: %s, %s',EIP)
# use this variable to determine if a VGW has been processed so we will only process one VGW per run (one per minute)
processed_vgw = False
#Get list of regions so poller can look for VGWs in all regions
ec2=boto3.client('ec2',region_name='us-east-1')
regions=ec2.describe_regions()
for region in regions['Regions']:
#Get region name for the current region
region_id=region['RegionName']
log.debug('Checking region: %s',region_id)
#Create EC2 connection to this region to get list of VGWs
ec2=boto3.client('ec2',region_name=region_id)
#Get list of all VGWs in the region
vgws=ec2.describe_vpn_gateways(Filters=[
{'Name':'state','Values':['available', 'attached', 'detached']}
])
#Get list of Transit VPC tagged VPN connections in the region as well
vpns=ec2.describe_vpn_connections(Filters=[
{'Name':'state','Values':['available','pending','deleting']},
{'Name':'tag:'+ HUB_TAG,'Values':[HUB_TAG_VALUE] }
])
#Process all the VGWs in the region
for vgw in vgws['VpnGateways']:
#Check to see if the VGW has tags, if not, then we should skip it
if vgw.get('Tags', '') == '':
continue
#Put all of the VGW tags into a dict for easier processing
vgwTags = getTags(vgw['Tags'])
#Configure HUB_TAG if it is not set already (for untagged VGWs)
vgwTags[HUB_TAG] = vgwTags.get( HUB_TAG, '')
#Determine if VGW is tagged as a spoke
spoke_vgw = False
if vgwTags[HUB_TAG] == HUB_TAG_VALUE:
spoke_vgw = True
#Check to see if the VGW already has Transit VPC VPN Connections
vpn_existing=False
for vpn in vpns['VpnConnections']:
if vpn['VpnGatewayId']==vgw['VpnGatewayId']:
vpn_existing=True
break
#Need to create VPN connections if this is a spoke VGW and no VPN connections already exist
if spoke_vgw and not vpn_existing:
log.info('Found a new VGW (%s) which needs VPN connections.', vgw['VpnGatewayId'])
#Create Customer Gateways (will create CGWs if they do not exist, otherwise, the API calls are ignored)
log.debug('Creating Customer Gateways with IP %s, %s', EIP)
cg1=ec2.create_customer_gateway(Type='ipsec.1',PublicIp=EIP,BgpAsn=BGP_ASN)
ec2.create_tags(Resources=[cg1['CustomerGateway']['CustomerGatewayId']], Tags=[{'Key': 'Name','Value': 'Transit VPC Endpoint1' }])
log.info('Created Customer Gateways: %s, %s',cg1['CustomerGateway']['CustomerGatewayId'])
#Create and tag first VPN connection
vpn1=ec2.create_vpn_connection(Type='ipsec.1',CustomerGatewayId=cg1['CustomerGateway']['CustomerGatewayId'],VpnGatewayId=vgw['VpnGatewayId'],Options={'StaticRoutesOnly':True})
ec2.create_tags(Resources=[vpn1['VpnConnection']['VpnConnectionId']],
Tags=[
{'Key': 'Name','Value': vgw['VpnGatewayId']+'-to-Transit-VPC CSR1' },
{'Key': HUB_TAG,'Value': HUB_TAG_VALUE },
{'Key': 'transitvpc:endpoint','Value': 'CSR1' }
])
log.info('Created VPN connections: %s, %s', vpn1['VpnConnection']['VpnConnectionId'])
#Retrieve VPN configuration
vpn_config1=ec2.describe_vpn_connections(VpnConnectionIds=[vpn1['VpnConnection']['VpnConnectionId']])
vpn_config1=vpn_config1['VpnConnections'][0]['CustomerGatewayConfiguration']
# Spoke subnet
spoke_subnet=ec2.describe_vpcs(VpcIds=[vgw['VpcAttachments'][0]['VpcId']])['Vpcs'][0]['CidrBlock']
#Update VPN configuration XML with transit VPC specific configuration info for this connection
vpn_config1=updateConfigXML(vpn_config1, vgwTags, account_id, spoke_subnet, 'CSR1')
#Put CSR1 config in S3
s3.put_object(
Body=str.encode(vpn_config1),
Bucket=bucket_name,
Key=bucket_prefix+'CSR1/'+region_id+'-'+vpn1['VpnConnection']['VpnConnectionId']+'.conf',
ACL='bucket-owner-full-control',
)
log.debug('Pushed VPN configurations to S3...')
processed_vgw = True
#Need to delete VPN connections if this is no longer a spoke VPC (tagged for spoke, but tag != spoke tag value) but Transit VPC connections exist
if not spoke_vgw and vpn_existing:
log.info('Found old VGW (%s) with VPN connections to remove.', vgw['VpnGatewayId'])
#We need to go through the region's VPN connections to find the ones to delete
for vpn in vpns['VpnConnections']:
if vpn['VpnGatewayId']==vgw['VpnGatewayId']:
#Put the VPN tags into a dict for easier processing
vpnTags = getTags(vpn['Tags'])
if vpnTags['transitvpc:endpoint'] == 'CSR1':
csrNum = '1'
else:
csrNum = '2'
#Need to get VPN configuration to remove from CSR
vpn_config=vpn['CustomerGatewayConfiguration']
# Spoke subnet
spoke_subnet=ec2.describe_vpcs(VpcIds=[vgw['VpcAttachments'][0]['VpcId']])['Vpcs'][0]['CidrBlock']
#Update VPN configuration XML with transit VPC specific configuration info for this connection
vpn_config=updateConfigXML(vpn_config, vgwTags, account_id, spoke_subnet, vpnTags['transitvpc:endpoint'])
s3.put_object(
Body=str.encode(vpn_config),
Bucket=bucket_name,
Key=bucket_prefix+'CSR'+csrNum+'/'+region_id+'-'+vpn['VpnConnectionId']+'.conf',
ACL='bucket-owner-full-control',
)
log.debug('Pushed CSR%s configuration to S3.', csrNum)
#now we need to delete the VPN connection
ec2.delete_vpn_connection(VpnConnectionId=vpn['VpnConnectionId'])
log.info('Deleted VPN connection (%s) to CSR%s', vpn['VpnConnectionId'], csrNum)
#Attempt to clean up the CGW. This will only succeed if the CGW has no VPN connections are deleted
try:
ec2.delete_customer_gateway(CustomerGatewayId=vpn['CustomerGatewayId'])
log.info("Cleaned up %s since it has no VPN connections left", vpn['CustomerGatewayId'])
except:
log.debug("%s still has existing VPN connections", vpn['CustomerGatewayId'])
# if a VGW has been processed, then we need to break out of VGW processing
if processed_vgw:
break
# if a VGW has been processed, then we need to break out of region processing
if processed_vgw:
break
|
the-stack_106_13958
|
import logging
import sys
import ply.lex
from jsonpath_ng.exceptions import JsonPathLexerError
logger = logging.getLogger(__name__)
class JsonPathLexer(object):
'''
A Lexical analyzer for JsonPath.
'''
def __init__(self, debug=False):
self.debug = debug
if self.__doc__ == None:
raise JsonPathLexerError(
'Docstrings have been removed! By design of PLY, jsonpath-rw requires docstrings. You must not use PYTHONOPTIMIZE=2 or python -OO.')
def tokenize(self, string):
'''
Maps a string to an iterator over tokens. In other words: [char] -> [token]
'''
new_lexer = ply.lex.lex(module=self, debug=self.debug, errorlog=logger)
new_lexer.latest_newline = 0
new_lexer.string_value = None
new_lexer.input(string)
while True:
t = new_lexer.token()
if t is None: break
t.col = t.lexpos - new_lexer.latest_newline
yield t
if new_lexer.string_value is not None:
raise JsonPathLexerError('Unexpected EOF in string literal or identifier')
# ============== PLY Lexer specification ==================
#
# This probably should be private but:
# - the parser requires access to `tokens` (perhaps they should be defined in a third, shared dependency)
# - things like `literals` might be a legitimate part of the public interface.
#
# Anyhow, it is pythonic to give some rope to hang oneself with :-)
literals = ['*', '.', '[', ']', '(', ')', '$', ',', ':', '|', '&', '~']
reserved_words = {'where': 'WHERE'}
tokens = ['DOUBLEDOT', 'NUMBER', 'ID', 'NAMED_OPERATOR'] + list(reserved_words.values())
states = [('singlequote', 'exclusive'),
('doublequote', 'exclusive'),
('backquote', 'exclusive')]
# Normal lexing, rather easy
t_DOUBLEDOT = r'\.\.'
t_ignore = ' \t'
def t_ID(self, t):
r'[a-zA-Z_@][a-zA-Z0-9_@\-]*'
t.type = self.reserved_words.get(t.value, 'ID')
return t
def t_NUMBER(self, t):
r'-?\d+'
t.value = int(t.value)
return t
# Single-quoted strings
t_singlequote_ignore = ''
def t_singlequote(self, t):
r"'"
t.lexer.string_start = t.lexer.lexpos
t.lexer.string_value = ''
t.lexer.push_state('singlequote')
def t_singlequote_content(self, t):
r"[^'\\]+"
t.lexer.string_value += t.value
def t_singlequote_escape(self, t):
r'\\.'
t.lexer.string_value += t.value[1]
def t_singlequote_end(self, t):
r"'"
t.value = t.lexer.string_value
t.type = 'ID'
t.lexer.string_value = None
t.lexer.pop_state()
return t
def t_singlequote_error(self, t):
raise JsonPathLexerError(
'Error on line %s, col %s while lexing singlequoted field: Unexpected character: %s ' % (
t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
# Double-quoted strings
t_doublequote_ignore = ''
def t_doublequote(self, t):
r'"'
t.lexer.string_start = t.lexer.lexpos
t.lexer.string_value = ''
t.lexer.push_state('doublequote')
def t_doublequote_content(self, t):
r'[^"\\]+'
t.lexer.string_value += t.value
def t_doublequote_escape(self, t):
r'\\.'
t.lexer.string_value += t.value[1]
def t_doublequote_end(self, t):
r'"'
t.value = t.lexer.string_value
t.type = 'ID'
t.lexer.string_value = None
t.lexer.pop_state()
return t
def t_doublequote_error(self, t):
raise JsonPathLexerError(
'Error on line %s, col %s while lexing doublequoted field: Unexpected character: %s ' % (
t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
# Back-quoted "magic" operators
t_backquote_ignore = ''
def t_backquote(self, t):
r'`'
t.lexer.string_start = t.lexer.lexpos
t.lexer.string_value = ''
t.lexer.push_state('backquote')
def t_backquote_escape(self, t):
r'\\.'
t.lexer.string_value += t.value[1]
def t_backquote_content(self, t):
r"[^`\\]+"
t.lexer.string_value += t.value
def t_backquote_end(self, t):
r'`'
t.value = t.lexer.string_value
t.type = 'NAMED_OPERATOR'
t.lexer.string_value = None
t.lexer.pop_state()
return t
def t_backquote_error(self, t):
raise JsonPathLexerError(
'Error on line %s, col %s while lexing backquoted operator: Unexpected character: %s ' % (
t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
# Counting lines, handling errors
def t_newline(self, t):
r'\n'
t.lexer.lineno += 1
t.lexer.latest_newline = t.lexpos
def t_error(self, t):
raise JsonPathLexerError('Error on line %s, col %s: Unexpected character: %s ' % (
t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
if __name__ == '__main__':
logging.basicConfig()
lexer = JsonPathLexer(debug=True)
for token in lexer.tokenize(sys.stdin.read()):
print('%-20s%s' % (token.value, token.type))
|
the-stack_106_13959
|
"""
ELF (Unix/BSD executable file format) parser.
Author: Victor Stinner, Robert Xiao
Creation date: 08 may 2006
"""
from hachoir.parser import HachoirParser
from hachoir.field import (RootSeekableFieldSet, FieldSet, Bit, NullBits, RawBits,
UInt8, UInt16, UInt32, UInt64, Enum,
String, RawBytes, Bytes)
from hachoir.core.text_handler import textHandler, hexadecimal
from hachoir.core.endian import LITTLE_ENDIAN, BIG_ENDIAN
class ElfHeader(FieldSet):
LITTLE_ENDIAN_ID = 1
BIG_ENDIAN_ID = 2
MACHINE_NAME = {
# e_machine, EM_ defines
0: "No machine",
1: "AT&T WE 32100",
2: "SPARC",
3: "Intel 80386",
4: "Motorola 68000",
5: "Motorola 88000",
6: "Intel 80486",
7: "Intel 80860",
8: "MIPS I Architecture",
9: "Amdahl UTS on System/370",
10: "MIPS RS3000 Little-endian",
11: "IBM RS/6000 XXX reserved",
15: "Hewlett-Packard PA-RISC",
16: "NCube XXX reserved",
17: "Fujitsu VPP500",
18: "Enhanced instruction set SPARC",
19: "Intel 80960",
20: "PowerPC 32-bit",
21: "PowerPC 64-bit",
36: "NEC V800",
37: "Fujitsu FR20",
38: "TRW RH-32",
39: "Motorola RCE",
40: "Advanced RISC Machines (ARM)",
41: "DIGITAL Alpha",
42: "Hitachi Super-H",
43: "SPARC Version 9",
44: "Siemens Tricore",
45: "Argonaut RISC Core",
46: "Hitachi H8/300",
47: "Hitachi H8/300H",
48: "Hitachi H8S",
49: "Hitachi H8/500",
50: "Intel Merced (IA-64) Processor",
51: "Stanford MIPS-X",
52: "Motorola Coldfire",
53: "Motorola MC68HC12",
62: "Advanced Micro Devices x86-64",
75: "DIGITAL VAX",
36902: "used by NetBSD/alpha; obsolete",
}
CLASS_NAME = {
# e_ident[EI_CLASS], ELFCLASS defines
1: "32 bits",
2: "64 bits"
}
TYPE_NAME = {
# e_type, ET_ defines
0: "No file type",
1: "Relocatable file",
2: "Executable file",
3: "Shared object file",
4: "Core file",
0xFF00: "Processor-specific (0xFF00)",
0xFFFF: "Processor-specific (0xFFFF)",
}
OSABI_NAME = {
# e_ident[EI_OSABI], ELFOSABI_ defines
0: "UNIX System V ABI",
1: "HP-UX operating system",
2: "NetBSD",
3: "GNU/Linux",
4: "GNU/Hurd",
5: "86Open common IA32 ABI",
6: "Solaris",
7: "Monterey",
8: "IRIX",
9: "FreeBSD",
10: "TRU64 UNIX",
11: "Novell Modesto",
12: "OpenBSD",
97: "ARM",
255: "Standalone (embedded) application",
}
ENDIAN_NAME = {
# e_ident[EI_DATA], ELFDATA defines
LITTLE_ENDIAN_ID: "Little endian",
BIG_ENDIAN_ID: "Big endian",
}
def createFields(self):
yield Bytes(self, "signature", 4, r'ELF signature ("\x7fELF")')
yield Enum(UInt8(self, "class", "Class"), self.CLASS_NAME)
if self["class"].value == 1:
ElfLongWord = UInt32
else:
ElfLongWord = UInt64
yield Enum(UInt8(self, "endian", "Endian"), self.ENDIAN_NAME)
yield UInt8(self, "file_version", "File version")
yield Enum(UInt8(self, "osabi_ident", "OS/syscall ABI identification"), self.OSABI_NAME)
yield UInt8(self, "abi_version", "syscall ABI version")
yield String(self, "pad", 7, "Pad")
yield Enum(UInt16(self, "type", "File type"), self.TYPE_NAME)
yield Enum(UInt16(self, "machine", "Machine type"), self.MACHINE_NAME)
yield UInt32(self, "version", "ELF format version")
yield textHandler(ElfLongWord(self, "entry", "Entry point"), hexadecimal)
yield ElfLongWord(self, "phoff", "Program header file offset")
yield ElfLongWord(self, "shoff", "Section header file offset")
yield UInt32(self, "flags", "Architecture-specific flags")
yield UInt16(self, "ehsize", "Elf header size (this header)")
yield UInt16(self, "phentsize", "Program header entry size")
yield UInt16(self, "phnum", "Program header entry count")
yield UInt16(self, "shentsize", "Section header entry size")
yield UInt16(self, "shnum", "Section header entry count")
yield UInt16(self, "shstrndx", "Section header string table index")
def isValid(self):
if self["signature"].value != b"\x7FELF":
return "Wrong ELF signature"
if self["class"].value not in self.CLASS_NAME:
return "Unknown class"
if self["endian"].value not in self.ENDIAN_NAME:
return "Unknown endian (%s)" % self["endian"].value
return ""
class SectionFlags(FieldSet):
def createFields(self):
if self.root.endian == BIG_ENDIAN:
if self.root.is64bit:
yield RawBits(self, "reserved[]", 32)
yield RawBits(self, "processor_specific", 4, "Processor specific flags")
yield NullBits(self, "reserved[]", 17)
yield Bit(self, "is_tls", "Section contains TLS data?")
yield NullBits(self, "reserved[]", 7)
yield Bit(self, "is_exec", "Section contains executable instructions?")
yield Bit(self, "is_alloc", "Section occupies memory?")
yield Bit(self, "is_writable", "Section contains writable data?")
else:
yield Bit(self, "is_writable", "Section contains writable data?")
yield Bit(self, "is_alloc", "Section occupies memory?")
yield Bit(self, "is_exec", "Section contains executable instructions?")
yield NullBits(self, "reserved[]", 7)
yield Bit(self, "is_tls", "Section contains TLS data?")
yield RawBits(self, "processor_specific", 4, "Processor specific flags")
yield NullBits(self, "reserved[]", 17)
if self.root.is64bit:
yield RawBits(self, "reserved[]", 32)
class SymbolStringTableOffset(UInt32):
def createDisplay(self):
section_index = self['/header/shstrndx'].value
section = self['/section[' + str(section_index) + ']']
text = section.value[self.value:]
return text.split('\0', 1)[0]
class SectionHeader32(FieldSet):
static_size = 40 * 8
TYPE_NAME = {
# sh_type, SHT_ defines
0: "Inactive",
1: "Program defined information",
2: "Symbol table section",
3: "String table section",
4: "Relocation section with addends",
5: "Symbol hash table section",
6: "Dynamic section",
7: "Note section",
8: "Block started by symbol (BSS) or No space section",
9: "Relocation section without addends",
10: "Reserved - purpose unknown",
11: "Dynamic symbol table section",
}
def createFields(self):
yield SymbolStringTableOffset(self, "name", "Section name (index into section header string table)")
yield Enum(textHandler(UInt32(self, "type", "Section type"), hexadecimal), self.TYPE_NAME)
yield SectionFlags(self, "flags", "Section flags")
yield textHandler(UInt32(self, "VMA", "Virtual memory address"), hexadecimal)
yield textHandler(UInt32(self, "LMA", "Logical memory address (offset in file)"), hexadecimal)
yield textHandler(UInt32(self, "size", "Section size (bytes)"), hexadecimal)
yield UInt32(self, "link", "Index of a related section")
yield UInt32(self, "info", "Type-dependent information")
yield UInt32(self, "addr_align", "Address alignment (bytes)")
yield UInt32(self, "entry_size", "Size of each entry in section")
def createDescription(self):
return "Section header (name: %s, type: %s)" % \
(self["name"].display, self["type"].display)
class SectionHeader64(SectionHeader32):
static_size = 64 * 8
def createFields(self):
yield SymbolStringTableOffset(self, "name", "Section name (index into section header string table)")
yield Enum(textHandler(UInt32(self, "type", "Section type"), hexadecimal), self.TYPE_NAME)
yield SectionFlags(self, "flags", "Section flags")
yield textHandler(UInt64(self, "VMA", "Virtual memory address"), hexadecimal)
yield textHandler(UInt64(self, "LMA", "Logical memory address (offset in file)"), hexadecimal)
yield textHandler(UInt64(self, "size", "Section size (bytes)"), hexadecimal)
yield UInt32(self, "link", "Index of a related section")
yield UInt32(self, "info", "Type-dependent information")
yield UInt64(self, "addr_align", "Address alignment (bytes)")
yield UInt64(self, "entry_size", "Size of each entry in section")
class ProgramFlags(FieldSet):
static_size = 32
FLAGS = (('pf_r', 'readable'), ('pf_w', 'writable'), ('pf_x', 'executable'))
def createFields(self):
if self.root.endian == BIG_ENDIAN:
yield NullBits(self, "padding[]", 29)
for fld, desc in self.FLAGS:
yield Bit(self, fld, "Segment is " + desc)
else:
for fld, desc in reversed(self.FLAGS):
yield Bit(self, fld, "Segment is " + desc)
yield NullBits(self, "padding[]", 29)
def createDescription(self):
attribs = []
for fld, desc in self.FLAGS:
if self[fld].value:
attribs.append(desc)
return 'Segment is ' + ', '.join(attribs)
class ProgramHeader32(FieldSet):
TYPE_NAME = {
# p_type, PT_ defines
0: "Unused program header table entry",
1: "Loadable program segment",
2: "Dynamic linking information",
3: "Program interpreter",
4: "Auxiliary information",
5: "Reserved, unspecified semantics",
6: "Entry for header table itself",
7: "Thread Local Storage segment",
0x70000000: "MIPS_REGINFO",
}
static_size = 32 * 8
def createFields(self):
yield Enum(UInt32(self, "type", "Segment type"), ProgramHeader32.TYPE_NAME)
yield UInt32(self, "offset", "Offset")
yield textHandler(UInt32(self, "vaddr", "V. address"), hexadecimal)
yield textHandler(UInt32(self, "paddr", "P. address"), hexadecimal)
yield UInt32(self, "file_size", "File size")
yield UInt32(self, "mem_size", "Memory size")
yield ProgramFlags(self, "flags")
yield UInt32(self, "align", "Alignment padding")
def createDescription(self):
return "Program Header (%s)" % self["type"].display
class ProgramHeader64(ProgramHeader32):
static_size = 56 * 8
def createFields(self):
yield Enum(UInt32(self, "type", "Segment type"), ProgramHeader32.TYPE_NAME)
yield ProgramFlags(self, "flags")
yield UInt64(self, "offset", "Offset")
yield textHandler(UInt64(self, "vaddr", "V. address"), hexadecimal)
yield textHandler(UInt64(self, "paddr", "P. address"), hexadecimal)
yield UInt64(self, "file_size", "File size")
yield UInt64(self, "mem_size", "Memory size")
yield UInt64(self, "align", "Alignment padding")
class ElfFile(HachoirParser, RootSeekableFieldSet):
MAGIC = b"\x7FELF"
PARSER_TAGS = {
"id": "elf",
"category": "program",
"file_ext": ("so", ""),
"min_size": 52 * 8, # At least one program header
"mime": (
"application/x-executable",
"application/x-object",
"application/x-sharedlib",
"application/x-executable-file",
"application/x-coredump"),
"magic": ((b"\x7FELF", 0),),
"description": "ELF Unix/BSD program/library"
}
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(
self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic"
err = self["header"].isValid()
if err:
return err
return True
def createFields(self):
# Choose the right endian depending on endian specified in header
if self.stream.readBits(5 * 8, 8, BIG_ENDIAN) == ElfHeader.BIG_ENDIAN_ID:
self.endian = BIG_ENDIAN
else:
self.endian = LITTLE_ENDIAN
# Parse header and program headers
yield ElfHeader(self, "header", "Header")
self.is64bit = (self["header/class"].value == 2)
for index in range(self["header/phnum"].value):
if self.is64bit:
yield ProgramHeader64(self, "prg_header[]")
else:
yield ProgramHeader32(self, "prg_header[]")
self.seekByte(self["header/shoff"].value, relative=False)
for index in range(self["header/shnum"].value):
if self.is64bit:
yield SectionHeader64(self, "section_header[]")
else:
yield SectionHeader32(self, "section_header[]")
for index in range(self["header/shnum"].value):
field = self["section_header[" + str(index) + "]"]
if field['size'].value != 0:
self.seekByte(field['LMA'].value, relative=False)
yield RawBytes(self, "section[" + str(index) + "]", field['size'].value)
def createDescription(self):
return "ELF Unix/BSD program/library: %s" % (
self["header/class"].display)
|
the-stack_106_13960
|
import requests
import sys
from functools import wraps
import inspect
from json import loads
class IEX(object):
"""
Base class for IEX API
"""
_IEX_API_URL = "https://api.iextrading.com/1.0"
def __init__(self, retries=5):
"""
Initialize the class
retries: Maximum amount of retries in case of faulty connection or
server not able to answer the call.
"""
self.retries = retries
def _retry(func):
"""
Decorator for retrying api calls
func: The function to be retried
"""
@wraps(func)
def _retry_wrapper(self, *args, **kwargs):
error_message = ""
for retry in range(self.retries + 1):
try:
return func(self, *args, **kwargs)
except ValueError as err:
error_message = str(err)
raise ValueError(str(error_message))
return _retry_wrapper
@classmethod
def _get_args(cls, func):
"""
Get the args of the function
func: The function to be used
"""
argspec = inspect.getargspec(func)
try:
defaults = dict(zip(argspec.args[pos:], argspec.defaults))
except:
if argspec.args:
# no default
defaults = {}
elif argspec.defaults:
defaults = argspec.defaults
return argspec, defaults
@classmethod
def _call_api_on_func(cls, func):
argspec, defaults = cls._get_args(func)
@wraps(func)
def _call_wrapper(self, *args, **kwargs):
function_names = func(self, *args, **kwargs)
url = "{}/{}?".format(IEX._IEX_API_URL, '/'.join(function_names))
for idx, arg_name in enumerate(argspec.args[1:]):
if arg_name in defaults:
try:
arg_value = kwargs[arg_name]
except KeyError:
arg_value = defaults[arg_name]
if arg_value:
if isinstance(arg_value, tuple) or isinstance(arg_value, list):
arg_value = ','.join([str(v) for v in arg_value])
url = "{}{}={}&".format(url, arg_name, arg_value)
url = url[:-1]
return self._handle_api_call(url)
return _call_wrapper
@_retry
def _handle_api_call(self, url):
"""
Handle the return call from the api and return a data object.
It raises a ValueError on problems.
url: The url of the service
"""
response = requests.get(url)
return response.json()
|
the-stack_106_13961
|
from ..util import *
import re
def get_kmer_count_dict(dna, k, precision='exact', d=None):
"""
Creates a dictionary of counts for each k-mer in a string.
:param dna: a string of DNA
:param k: the length of each substring
:param precision: how much precision is required in obtaining kmer_count
exact: exact string matches
mismatch: string matches with d or fewer differences
reverse: string matches reverse complement as well
loose: includes mismatch and reverse options
:param d: optional param indicating maximum number of mismatches (Hamming distance)
:return: a dictionary of each k-mer and the number of times it appears, the largest count
"""
k_mers = dict()
largest_count = 0
# Run for all precision levels
for i in range(0, len(dna) - k):
seq = dna[i:i + k]
if seq in k_mers:
k_mers[seq] += 1
if k_mers[seq] > largest_count:
largest_count = k_mers[seq]
else:
k_mers[seq] = 1
if precision == 'mismatch' or precision == 'loose':
for k_mer in k_mers.keys():
indexes = find_approximate_match_indexes(dna, k_mer, d)
k_mers[k_mer] = len(indexes)
if k_mers[k_mer] > largest_count:
largest_count = k_mers[k_mer]
if precision == 'reverse' or precision == 'loose':
new_kmers = dict()
for k_mer in k_mers.keys():
rc_kmer = reverse_complement(k_mer)
# Only add if reverse complement isn't already in new dictionary
if rc_kmer not in new_kmers:
# Determine if reverse complement is also present in the DNA
if rc_kmer in k_mers:
# Add reverse complement count to current count
total_count = k_mers[k_mer] + k_mers[rc_kmer]
else:
total_count = k_mers[k_mer]
# Add to new list of k-mers
new_kmers[k_mer] = total_count
if new_kmers[k_mer] > largest_count:
largest_count = new_kmers[k_mer]
k_mers = new_kmers
# Return dictionary of existing k-mers and their counts
return k_mers, largest_count
def find_pattern_clumps(dna, k, L, t):
"""
Find all patterns forming (L, t)-clumps in a given sequence of DNA.
:param dna: a string of DNA
:param k: the length of each k-mer
:param L: the size of the window determining clumps
:param t: the number of required occurrences
:return: a list of patterns that meet the clumping requirements
"""
# Create dictionary of k-mers
k_mers = dict()
for i in range(len(dna) - k):
seq = dna[i:i + k]
if seq in k_mers:
k_mers[seq] += 1
else:
k_mers[seq] = 1
# Get list of more common k_mers
sequences = set()
for sequence, frequency in k_mers.items():
if frequency >= t:
sequences.add(sequence)
# Create list of indexes for each common k_mer
k_mer_indexes = dict()
for pattern in sequences:
pattern_lookup = '(?=' + pattern + ')' # to find overlapping matches
indexes = [s.start() for s in re.finditer(pattern_lookup, dna)]
k_mer_indexes[pattern] = indexes
# Look for clumps
sequences.clear()
for pattern in k_mer_indexes:
indexes = k_mer_indexes.get(pattern)
for i in range(0, len(indexes) - t + 1):
if indexes[i + t - 1] + k - indexes[i] <= L: # make sure it completely fits
sequences.add(pattern)
def find_minimum_skew(dna, save_skew=False, file_name=None):
"""
Generates a list of indexes with the minimum G - C skew.
:param dna: a string of DNA
:param save_skew: whether to save the skew list to a file
:param file_name: the file to save the skew to
:return: a list of DNA indexes with the lowest skew
"""
skew = list()
current_skew = 0
lowest_skew = 0
skew.append(current_skew)
# Find the skew for each position
for i in range(len(dna)):
if dna[i] == 'C':
current_skew -= 1
elif dna[i] == 'G':
current_skew += 1
skew.append(current_skew)
if lowest_skew > current_skew:
lowest_skew = current_skew
if save_skew:
# Save skew to file
with open(file_name, 'w') as s:
for value in skew:
s.write("{},".format(value))
# Find the indexes with the lowest skew
minimum_indexes = list()
for i in range(len(skew)):
if skew[i] == lowest_skew:
minimum_indexes.append(i)
return minimum_indexes
|
the-stack_106_13962
|
from models.tridentnet.builder import TridentFasterRcnn as Detector
from models.tridentnet.builder_v2 import TridentResNetV1bC4 as Backbone
from models.tridentnet.builder import TridentRpnHead as RpnHead
from models.tridentnet.builder import process_branch_outputs, process_branch_rpn_outputs
from symbol.builder import Neck
from symbol.builder import RoiAlign as RoiExtractor
from symbol.builder import BboxC5V1Head as BboxHead
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 1 if is_train else 1
fp16 = False
class Trident:
num_branch = 3 if is_train else 1
train_scaleaware = False
test_scaleaware = False
branch_ids = range(num_branch) if is_train else [1]
branch_dilates = [1, 2, 3] if is_train else [2]
valid_ranges = [(0, -1), (0, -1), (0, -1)] if is_train else [(0, -1)]
valid_ranges_on_origin = True
branch_bn_shared = True
branch_conv_shared = True
branch_deform = False
assert num_branch == len(branch_ids)
assert num_branch == len(valid_ranges)
class KvstoreParam:
kvstore = "local"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
# normalizer = normalizer_factory(type="syncbn", ndev=len(KvstoreParam.gpus))
normalizer = normalizer_factory(type="fixbn")
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
depth = 152
num_branch = Trident.num_branch
branch_ids = Trident.branch_ids
branch_dilates = Trident.branch_dilates
branch_bn_shared = Trident.branch_bn_shared
branch_conv_shared = Trident.branch_conv_shared
branch_deform = Trident.branch_deform
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class RpnParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image * Trident.num_branch
class anchor_generate:
scale = (2, 4, 8, 16, 32)
ratio = (0.5, 1.0, 2.0)
stride = 16
image_anchor = 256
class head:
conv_channel = 512
mean = (0, 0, 0, 0)
std = (1, 1, 1, 1)
class proposal:
pre_nms_top_n = 12000 if is_train else 6000
post_nms_top_n = 500 if is_train else 300
nms_thr = 0.7
min_bbox_side = 0
class subsample_proposal:
proposal_wo_gt = True
image_roi = 128
fg_fraction = 0.5
fg_thr = 0.5
bg_thr_hi = 0.5
bg_thr_lo = 0.0
class bbox_target:
num_reg_class = 2
class_agnostic = True
weight = (1.0, 1.0, 1.0, 1.0)
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = 1 + 80
image_roi = 128
batch_image = General.batch_image * Trident.num_branch
class regress_target:
class_agnostic = True
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 7
stride = 16
class DatasetParam:
if is_train:
image_set = ("coco_train2014", "coco_valminusminival2014")
else:
image_set = ("coco_minival2014", )
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam)
roi_extractor = RoiExtractor(RoiParam)
bbox_head = BboxHead(BboxParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(
backbone, neck, rpn_head, roi_extractor, bbox_head,
num_branch=Trident.num_branch, scaleaware=Trident.train_scaleaware)
rpn_test_sym = None
test_sym = None
else:
train_sym = None
rpn_test_sym = detector.get_rpn_test_symbol(backbone, neck, rpn_head, Trident.num_branch)
test_sym = detector.get_test_symbol(
backbone, neck, rpn_head, roi_extractor, bbox_head, num_branch=Trident.num_branch)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
rpn_test_symbol = rpn_test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet%s_v1b" % BackboneParam.depth
epoch = 0
fixed_param = ["conv0", "stage1", "gamma", "beta"]
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = 5
class schedule:
begin_epoch = 0
end_epoch = 12
lr_iter = [120000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
160000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.0
iter = 3000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)
class TestParam:
min_det_score = 0.001
max_det_per_image = 100
process_roidb = lambda x: x
if Trident.test_scaleaware:
process_output = lambda x, y: process_branch_outputs(
x, Trident.num_branch, Trident.valid_ranges, Trident.valid_ranges_on_origin)
else:
process_output = lambda x, y: x
process_rpn_output = lambda x, y: process_branch_rpn_outputs(x, Trident.num_branch)
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
# data processing
class NormParam:
mean = tuple(i * 255 for i in (0.485, 0.456, 0.406)) # RGB order
std = tuple(i * 255 for i in (0.229, 0.224, 0.225))
class ResizeParam:
short = 800
long = 1200 if is_train else 2000
class PadParam:
short = 800
long = 1200 if is_train else 2000
max_num_gt = 100
class ScaleRange:
valid_ranges = Trident.valid_ranges
cal_on_origin = Trident.valid_ranges_on_origin # True: valid_ranges on origin image scale / valid_ranges on resized image scale
class AnchorTarget2DParam:
class generate:
short = 800 // 16
long = 1200 // 16
stride = 16
scales = (2, 4, 8, 16, 32)
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
class sample:
image_anchor = 256
pos_fraction = 0.5
class trident:
invalid_anchor_threshd = 0.3
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord, Norm2DImage
from models.tridentnet.input import ScaleAwareRange, TridentAnchorTarget2D
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
ScaleAwareRange(ScaleRange),
TridentAnchorTarget2D(AnchorTarget2DParam),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "gt_bbox"]
if Trident.train_scaleaware:
data_name.append("valid_ranges")
label_name = ["rpn_cls_label", "rpn_reg_target", "rpn_reg_weight"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
import core.detection_metric as metric
rpn_acc_metric = metric.AccWithIgnore(
"RpnAcc",
["rpn_cls_loss_output"],
["rpn_cls_label"]
)
rpn_l1_metric = metric.L1(
"RpnL1",
["rpn_reg_loss_output"],
["rpn_cls_label"]
)
# for bbox, the label is generated in network so it is an output
box_acc_metric = metric.AccWithIgnore(
"RcnnAcc",
["bbox_cls_loss_output", "bbox_label_blockgrad_output"],
[]
)
box_l1_metric = metric.L1(
"RcnnL1",
["bbox_reg_loss_output", "bbox_label_blockgrad_output"],
[]
)
metric_list = [rpn_acc_metric, rpn_l1_metric, box_acc_metric, box_l1_metric]
return General, KvstoreParam, RpnParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
|
the-stack_106_13963
|
#!/usr/bin/env python
"""Tests and validates classes from :py:mod:`plastid.genomics.genome_array`,
these being |GenomeArray|, |SparseGenomeArray| and |BAMGenomeArray|,
using test data found in plastid.test.data.
This module additionally contains utilites to generate other test datasets.
To do, please see the documentation for :py:func:`create_dataset`. Note,
this requires as external dependencies, Bowtie, Tophat, and Samtools.
"""
import copy
import tempfile
import os
import subprocess
import functools
import re
import unittest
import warnings
import pysam
import numpy.random
from pkg_resources import resource_filename, cleanup_resources
from nose.plugins.attrib import attr
from Bio import SeqIO
import plastid.util.services.exceptions
from plastid.readers.bed import BED_Reader
from plastid.genomics.genome_array import (
GenomeArray,
SparseGenomeArray,
BigWigGenomeArray,
BAMGenomeArray,
ThreePrimeMapFactory,
FivePrimeMapFactory,
CenterMapFactory,
five_prime_map,
three_prime_map,
center_map,
)
from plastid.genomics.roitools import GenomicSegment, SegmentChain
from plastid.genomics.seqtools import random_seq
from plastid.util.io.openers import NullWriter
from plastid.util.services.decorators import skip_if_abstract
from plastid.util.services.mini2to3 import cStringIO
#===============================================================================
# INDEX: annotations/data used in unit tests and in generation of test datasets
#===============================================================================
# parameters to flesh out unit tests
# these are used by AbstractGenomeArrayHelper.set_class_parameters below
_GENOME_ARRAY_PARAMS = {
"test_class": GenomeArray,
"empty_regions": ["splice", "introns"],
"native_format": "bowtie",
}
_SPARSE_GENOME_ARRAY_PARAMS = {
"test_class": SparseGenomeArray,
"empty_regions": ["splice", "introns"],
"native_format": "bowtie",
}
_BIGWIG_GENOME_ARRAY_PARAMS = {
"test_class": BigWigGenomeArray,
"empty_regions": ["introns"],
"native_format": "bigwig",
}
_BAM_GENOME_ARRAY_PARAMS = {
"test_class": BAMGenomeArray,
"empty_regions": ["introns"],
"native_format": "BAM",
}
# descriptions of mapping configurations that we will use in test datasets
_SAMPLE_PAT = re.compile(r"(center|fiveprime|threeprime)_([0-9]+)")
_SAMPLE_BASES = [
'center_0',
'center_12',
'fiveprime_0',
'fiveprime_15',
'threeprime_0',
'threeprime_15',
]
_GA_MAP_FUNCTIONS = {
"fiveprime": five_prime_map,
"threeprime": three_prime_map,
"center": center_map,
}
_BAM_MAP_RULES = {
"fiveprime_0": FivePrimeMapFactory(),
"threeprime_0": ThreePrimeMapFactory(),
"center_0": CenterMapFactory(),
"fiveprime_15": FivePrimeMapFactory(15),
"threeprime_15": ThreePrimeMapFactory(15),
"center_12": CenterMapFactory(12),
}
# constants/names of files in test datasets, to use in test cases
# or to generate using methods below
#
# all filenames are relative to a base_folder that is passed to individual functions
# these files will be created by create_test_dataset(), below
_TEST_FILES = {
"variable_step_fw": os.path.join("wig", "variable_step_fw.wig"),
"variable_step_rc": os.path.join("wig", "variable_step_rc.wig"),
"bedgraph_fw": os.path.join("wig", "bedgraph_fw.wig"),
"bedgraph_rc": os.path.join("wig", "bedgraph_rc.wig"),
"juncs": os.path.join("ebwt", "chrA.juncs"),
"bowtie_index": os.path.join("ebwt", "chrA"),
"bed": os.path.join("bed", "chrA.bed"),
"reads": os.path.join("fasta", "chrA_reads.fa"),
"bowtie": os.path.join("align", "chrA_unspliced.bowtie"),
"bam": os.path.join("align", "chrA_tophat.bam"),
"genome": os.path.join("fasta", "chrA.fa"),
}
# annotation data
TEST_CHR_BED = """chrA 100 1100 unique_plus 0 + -1 -1 0,0,0 1 1000, 0,
chrA 100 1100 unique_minus 0 - -1 -1 0,0,0 1 1000, 0,
chrA 1200 2250 entire_repeat_region_plus 0 + -1 -1 0,0,0 1 1050, 0,
chrA 1200 2250 entire_repeat_region_minus 0 - -1 -1 0,0,0 1 1050, 0,
chrA 1200 1700 repeat_1_plus 0 + -1 -1 0,0,0 1 500, 0,
chrA 1200 1700 repeat_1_minus 0 - -1 -1 0,0,0 1 500, 0,
chrA 1750 2250 repeat_2_plus 0 + -1 -1 0,0,0 1 500, 0,
chrA 1750 2250 repeat_2_minus 0 - -1 -1 0,0,0 1 500, 0,
chrA 2350 2475 splice_plus 100 + -1 -1 0,0,0 2 25,25, 0,100,
chrA 2350 2475 splice_minus 100 - -1 -1 0,0,0 2 25,25, 0,100,
chrA 2375 2450 intron_plus 0 + -1 -1 0,0,0 1 75, 0,
chrA 2375 2450 intron_minus 0 - -1 -1 0,0,0 1 75, 0,""".replace(
" ", "\t"
)
TEST_CHR_JUNCS = """chrA 2374 2450 +
chrA 2374 2450 -""".replace(" ", "\t")
# miscellaneous constants
STRAND_KEYS = {"+": "fw", "-": "rc"}
DEFAULT_READS_PER_REGION = 1000
DEFAULT_READ_LENGTH = 30
#===============================================================================
# INDEX: Helper functions for unit tests and test dataset creation methods below
#===============================================================================
def tearDownModule():
"""Remove test dataset files after unit tests are complete"""
cleanup_resources()
def fetch_regions():
"""Parses test regions of interest for synthetic genomes
Returns
-------
list<SegmentChain>
"""
return list(BED_Reader(cStringIO.StringIO(TEST_CHR_BED), return_type=SegmentChain))
def _read_count_vectors(base_folder):
"""Read count vectors from a synthetic datasets
generated by :py:method:`create_test_dataset`
Parameters
----------
base_folder : str
path to base folder passed to :py:method:`create_test_dataset`
Returns
-------
dict : dict of numpy.ndarrays of count data
"""
dtmp = {}
for k in _SAMPLE_BASES:
for strand_key in ("fw", "rc"):
dtmp["%s_%s" % (k, strand_key)] = numpy.loadtxt(
os.path.join(base_folder, "count_vectors", "%s_%s.txt" % (k, strand_key))
)
return dtmp
def _read_bowtie_files_to_genome_arrays(base_folder, test_class=GenomeArray):
"""Construct |GenomeArray| s from bowtie files
Parameters
----------
base_folder : str
path to base folder passed to create_test_dataset()
test_class : class
Subclass of |MutableGenomeArray| (e.g. |GenomeArray| or |SparseGenomeArray| to test)
Returns
-------
dict : dict of |GenomeArray| s of mapped read alignments from bowtie
"""
gnds = {}
for k in _SAMPLE_BASES:
mapping, offset = _SAMPLE_PAT.search(k).groups()
trans_key = "nibble" if mapping == "center" else "offset"
trans_args = {trans_key: int(offset)}
gnds[k] = test_class()
with open(os.path.join(base_folder, _TEST_FILES["bowtie"])) as fh:
gnds[k].add_from_bowtie(fh, _GA_MAP_FUNCTIONS[k.split("_")[0]], **trans_args)
return gnds
def _get_ivc_numpy_counts(ivc, count_vec):
"""Fetches appropriately-spliced counts at each position in an ROI from a numpy array
Parameters
----------
ivc : |SegmentChain|
SegmentChain describing region of interest
count_vec : numpy.ndarray
numpy.ndarray, in coordinates matching those of ivc
Returns
-------
numpy.ndarray : numpy.ndarray of counts each each position in ivc
"""
counts = []
for iv in ivc:
counts.extend(count_vec[iv.start:iv.end])
if ivc.spanning_segment.strand == "-":
counts = counts[::-1]
return numpy.array(counts)
#===============================================================================
# INDEX: unittest suites
#===============================================================================
class AbstractGenomeArrayHelper(unittest.TestCase):
"""Abstract base class for various types of |AbstractGenomeArray| test cases"""
set_up = False
@staticmethod
def set_class_parameters(
cls, params, test_folder=resource_filename("plastid", "test/data/mini"), tol=1e-8
):
"""Set class parameters on the creation of the first instance.
This is a bit of a hack because we need to set class parameters.
We can't do this in a ``setUpClass`` method, because ``setUpClass`` only
accepts a single parameter (the class). We don't want to do this in
``__init__`` either, because unittest calls ``__init__`` once per test run,
and these operations are expensive. So, instead we define this method,
and call it from ``__init__`` if and only if ``cls.set_up == False``
Parameters
----------
cls : class
class that is a subclass of :py:class:`unittest.TestCase`,
to which parameters will be appended
params : dict
Parameters specific to the set-up of test suites for specific
types of GenomeArrays
test_folder : str or :py:class:`Resource`
Real or virtual location of folder of test data
tol : float
Tolerance for numerical differences between expected and observed
values in the various tests
"""
cls.test_folder = test_folder
cls.tol = tol
cls.test_class = params["test_class"]
cls.native_format = params["native_format"]
cls.count_vecs = _read_count_vectors(cls.test_folder)
cls.regions = fetch_regions()
cls.region_classes = {
"unique": [X for X in cls.regions if "unique" in X.get_name()],
"repeat":
[X for X in cls.regions if "entire" not in X.get_name() and "repeat" in X.get_name()],
"introns": [X for X in cls.regions if "intron" in X.get_name()],
"splice": [X for X in cls.regions if "splice" in X.get_name()],
"entire": [X for X in cls.regions if "entire" in X.get_name()],
}
cls.region_classes["empty"] = []
cls.empty_names = []
for k in params["empty_regions"]:
my_regions = cls.region_classes[k]
cls.region_classes["empty"].extend(my_regions)
cls.empty_names.extend([X.get_name() for X in my_regions])
cls.expected_unnorm_sum = 0
#for region in set(cls.regions) - set(cls.region_classes["empty"]) - set(cls.region_classes["entire"]):
read_regions = [
X for X in cls.regions
if all([X.get_name() not in cls.empty_names, "entire" not in X.get_name()])
]
for region in read_regions:
vec_key = "fw" if region.strand == "+" else "rc"
cls.expected_unnorm_sum += _get_ivc_numpy_counts(
region, cls.count_vecs["fiveprime_0_%s" % vec_key]
).sum()
cls.set_up = True
def __init__(
self,
methodName='runTest',
params={},
test_folder=resource_filename("plastid", "test/data/mini"),
tol=1e-8
):
"""Initialize test case to run a single method.
We override this method to make sure expensive operations are only run when
the first instance is made, and then stored in class attributes
Parameters
----------
methodName : str
Name of method being run. Required by :py:class:`unittest.TestCase`
params : dict
Parameters specific to the set-up of test suites for specific
|AbstractGenomeArray| subclasses. Don't change these
test_folder : str or :py:class:`Resource`
Real or virtual location of folder of test data
tol : float
Tolerance for numerical differences between expected and observed
values in the various tests
"""
unittest.TestCase.__init__(self, methodName=methodName)
# only do setup if __init__ is called by a subclass
if "Abstract" not in self.__class__.__name__:
if self.__class__.set_up == False:
AbstractGenomeArrayHelper.set_class_parameters(
self.__class__, params=params, test_folder=test_folder, tol=tol
)
@skip_if_abstract
def test_chroms(self):
for v in self.gnds.values():
self.assertEqual(set(v.chroms()), set(["chrA"]))
@skip_if_abstract
def test_strands(self):
possible_strands = set(["+", "-", "."])
for v in self.gnds.values():
self.assertGreaterEqual(len(set(v.strands()) & possible_strands), 0)
@skip_if_abstract
def test_test_class(self):
# Assure all genome arrays tested are of correct subclass
for k, v in self.gnds.items():
self.assertTrue(
isinstance(v, self.test_class),
"Test %s: instance is of wrong class (expected: %s, found %s)" %
(k, self.test_class.__name__, v.__class__.__name__)
)
@skip_if_abstract
def test_native_import_positionwise_equality_unique_regions(self):
for k in _SAMPLE_BASES:
for region in self.region_classes["unique"]:
strand_key = STRAND_KEYS[region.spanning_segment.strand]
gnd_counts = numpy.array(region.get_counts(self.gnds[k]))
self.assertGreater(gnd_counts.sum(), 0, "Region is empty in sample %s" % k)
known_counts = _get_ivc_numpy_counts(
region, self.count_vecs["%s_%s" % (k, strand_key)]
)
max_err = max(abs(gnd_counts - known_counts))
msg1 = "Positionwise count difference (%s) exceeded tolerance (%s) for '%s' file import for sample test '%s'" % (
self.tol, max_err, self.native_format, k
)
self.assertLessEqual(max_err, self.tol, msg1)
sum_diff = abs(known_counts.sum() - gnd_counts.sum())
msg2 = "Error in difference of total counts (%s) exceeded tolerance (%s) for '%s' import for sample test %s" % (
sum_diff, self.tol, self.native_format, k
)
self.assertLessEqual(sum_diff, self.tol, msg2)
@skip_if_abstract
def test_native_import_positionwise_equality_repeat_regions(self):
# test sums of position-wise vectors for repeat regions
plus_repeat = [X for X in self.region_classes["repeat"] if X.spanning_segment.strand == "+"]
minus_repeat = [
X for X in self.region_classes["repeat"] if X.spanning_segment.strand == "-"
]
lengths = set([X.length for X in plus_repeat + minus_repeat])
self.assertEqual(len(lengths), 1)
for k in _SAMPLE_BASES:
plus_vec = numpy.zeros(plus_repeat[0].length)
minus_vec = numpy.zeros(plus_repeat[0].length)
known_plus_vec = numpy.zeros(plus_repeat[0].length)
known_minus_vec = numpy.zeros(plus_repeat[0].length)
for region in plus_repeat:
plus_vec += region.get_counts(self.gnds[k])
known_plus_vec += _get_ivc_numpy_counts(
region, self.count_vecs["%s_%s" % (k, "fw")]
)
for region in minus_repeat:
minus_vec += region.get_counts(self.gnds[k])
known_minus_vec += _get_ivc_numpy_counts(
region, self.count_vecs["%s_%s" % (k, "rc")]
)
self.assertGreater(plus_vec.sum(), 0)
self.assertGreater(minus_vec.sum(), 0)
self.assertTrue(
(abs(known_plus_vec - plus_vec) <= self.tol).all(),
"Positionwise count difference exceeded tolerance %s for %s import on sample test %s on plus strand"
% (self.tol, self.native_format, k)
)
self.assertTrue(
(abs(known_minus_vec - minus_vec) <= self.tol).all(),
"Positionwise count difference exceeded tolerance %s for %s import for sample test %s on minus strand"
% (self.tol, self.native_format, k)
)
@skip_if_abstract
def test_native_import_empty_regions(self):
# test regions that should be empty (e.g. introns and splicing)
for k in _SAMPLE_BASES:
for region in self.region_classes["empty"]:
self.assertEqual(
sum(region.get_counts(self.gnds[k])), 0,
"Found counts in region that should be empty for sample test %s" % k
)
@skip_if_abstract
def variablestep_and_bedgraph_export_helper(
self, wiggle_type, export_function, input_class=None, **kwargs
):
"""Helper function to evaluate tests on variable step wiggle or BED export
Parameters
----------
wiggle_type : str
Type of wiggle file. "variable_step" or "bedgraph"
export_function : function
unbound method defining export type (e.g. GenomeArray.to_variable_step, BAMGenomeArray.to_bedgraph)
input_class : subclass of |MutableAbstractGenomeArray| or None
Class into which exported wiggle or bedgraph files will be read. If None, defaults to self.test_class
kwargs : keyword arguments
"""
if input_class is None:
input_class = self.test_class
for k, v in self.gnds.items():
fw_out = tempfile.NamedTemporaryFile(mode="w", delete=False)
rc_out = tempfile.NamedTemporaryFile(mode="w", delete=False)
export_function(v, fw_out, "test", "+", **kwargs)
export_function(v, rc_out, "test", "-", **kwargs)
fw_out.close()
rc_out.close()
new_gnd = input_class()
with open(fw_out.name) as fh:
new_gnd.add_from_wiggle(fh, "+")
with open(rc_out.name) as fh:
new_gnd.add_from_wiggle(fh, "-")
self.assertGreater(v.lengths()["chrA"], 0)
self.assertGreater(new_gnd.lengths()["chrA"], 0)
ivplus = GenomicSegment("chrA", 0, v.lengths()["chrA"], "+")
ivminus = GenomicSegment("chrA", 0, v.lengths()["chrA"], "-")
# test equality of what was exported with current state of GenomeArray
self.assertTrue(
abs(new_gnd[ivplus] - v[ivplus] <= self.tol).all(),
"%s wiggle output on plus strand failed positionwise tolerance %s for test %s" %
(wiggle_type, self.tol, k)
)
self.assertGreater(
new_gnd[ivplus].sum(), 0,
"No counts found for %s reimport test %s" % (wiggle_type, k)
)
self.assertTrue(
abs(new_gnd[ivminus] - v[ivminus] <= self.tol).all(),
"%s wiggle output on minus strand failed positionwise tolerance %s for test %s" %
(wiggle_type, self.tol, k)
)
self.assertGreater(
new_gnd[ivminus].sum(), 0,
"No counts found for %s reimport test %s" % (wiggle_type, k)
)
# ground-truth test against numpy arrays for unique regions
for region in self.region_classes["unique"]:
strand_key = STRAND_KEYS[region.spanning_segment.strand]
gnd_counts = numpy.array(region.get_counts(new_gnd))
self.assertGreater(
gnd_counts.sum(), 0, "Reimported region is empty in sample %s" % k
)
known_counts = _get_ivc_numpy_counts(
region, self.count_vecs["%s_%s" % (k, strand_key)]
)
max_err = max(abs(gnd_counts - known_counts))
self.assertLessEqual(
max_err, self.tol,
"Positionwise count difference (%s) exceeded tolerance (%s) for %s reimport after export from class '%s' for sample '%s'"
% (self.tol, max_err, self.native_format, self.test_class, k)
)
os.remove(fw_out.name)
os.remove(rc_out.name)
@skip_if_abstract
def test_unnormalized_sum(self):
for k, v in self.gnds.items():
v.set_normalize(False)
found = v.sum()
expected = self.expected_unnorm_sum
err = abs(found - expected)
err_msg = "Observed error (%s) in unnormalized sum (observed %s; expected %s) greater than tolerance (%s) for sample '%s'" % (
err, found, expected, self.tol, k
)
self.assertLessEqual(err, self.tol, err_msg)
@skip_if_abstract
def test_normalize_not_change_sum(self):
for k, v in self.gnds.items():
v.set_normalize(True)
found_sum = v.sum()
err_msg = "Normalize flag changed sum to %s from %s for sample '%s'" % (
found_sum, self.expected_unnorm_sum, k
)
err = abs(found_sum - self.expected_unnorm_sum)
self.assertLessEqual(err, self.tol, err_msg)
v.set_normalize(False)
@skip_if_abstract
def test_set_and_reset_sum(self):
expected_unnorm_sum2 = 50000
for k, v in self.gnds.items():
v.set_sum(expected_unnorm_sum2)
v.set_normalize(False)
found = v.sum()
err = abs(found - expected_unnorm_sum2)
err_msg = "Observed error (%s) in sample '%s' set unnormalized sum (observed %s; expected %s) greater than tolerance %s" % (
err, k, found, expected_unnorm_sum2, self.tol
)
self.assertLessEqual(err, self.tol, err_msg)
v.set_normalize(True)
found = v.sum()
err = abs(found - expected_unnorm_sum2)
err_msg = "Observed error (%s) in sample '%s' set normalized sum (observed %s; expected %s) greater than tolerance %s" % (
err, k, found, expected_unnorm_sum2, self.tol
)
self.assertLessEqual(err, self.tol, err_msg)
v.set_normalize(False)
v.reset_sum()
found = v.sum()
err = abs(found - self.expected_unnorm_sum)
err_msg = "Observed error (%s) in sample '%s' reset sum (observed %s; expected %s) greater than tolerance %s" % (
err, k, found, self.expected_unnorm_sum, self.tol
)
self.assertLessEqual(err, self.tol, err_msg)
@skip_if_abstract
def test_regionwise_normalize_and_sum(self):
expected_unnorm_sum2 = 50000
for k, v in self.gnds.items():
v.set_normalize(False)
v.reset_sum()
# add an order of magnitude to account for summing
tol = self.tol * 10
# exclude repeat regions, because those will align differently than they were generated
# remove "empty" also, because this will include spliced regions for some tests,
# as necessary
nonrepeat_nonempty = [
X for X in self.regions
if all(["repeat" not in X.get_name(),
X.get_name() not in self.empty_names])
]
for region in nonrepeat_nonempty: #set(self.regions) - set(self.region_classes["repeat"]) - set(self.region_classes["empty"]):
# Make sure baseline number is ok
found_region_sum = sum(region.get_counts(v))
expected_region_unnorm = _get_ivc_numpy_counts(
region, self.count_vecs["%s_%s" % (k, STRAND_KEYS[region.strand])]
).sum()
err = abs(found_region_sum - expected_region_unnorm)
self.assertLessEqual(
err, tol,
"Found unnormalized region sum %s different from expected %s more than error %s"
% (found_region_sum, expected_region_unnorm, tol)
)
# Test normalize
v.set_normalize(True)
expected_region_norm = float(expected_region_unnorm) / v.sum() * 10**6
found_region_sum = sum(region.get_counts(v))
err = abs(found_region_sum - expected_region_norm)
self.assertLessEqual(
err, tol,
"Found normalized region sum (%s) different from expected (%s) more than error (observed %s; tolerance %s) for sample '%s'"
% (found_region_sum, expected_region_norm, err, tol, k)
)
# Test reversibility
v.set_normalize(False)
found_region_sum = sum(region.get_counts(v))
err = abs(found_region_sum - expected_region_unnorm)
self.assertLessEqual(
err, tol,
"Found re-unnormalized region sum %s different from expected %s more than error %s"
% (found_region_sum, expected_region_unnorm, tol)
)
# Set sum, no normalization
v.set_sum(expected_unnorm_sum2)
found_region_sum = sum(region.get_counts(v))
err = abs(found_region_sum - expected_region_unnorm)
self.assertLessEqual(
err, tol,
"Found post-global-sum-set unnormalized region sum %s different from expected %s more than error %s"
% (found_region_sum, expected_region_unnorm, tol)
)
# Add normalization on top of set sum
v.set_normalize(True)
expected_region_norm2 = float(expected_region_unnorm) / expected_unnorm_sum2 * 10**6
found_region_sum = sum(region.get_counts(v))
err = abs(found_region_sum - expected_region_norm2)
self.assertLessEqual(
err, tol,
"Found post-global-sum-set normalized region sum %s different from expected %s more than error %s"
% (found_region_sum, expected_region_norm2, tol)
)
# Reset sum, keep normalization
v.reset_sum()
found_region_sum = sum(region.get_counts(v))
err = abs(found_region_sum - expected_region_norm)
self.assertLessEqual(
err, tol,
"Found post-reset normalized region sum %s different from expected %s more than error %s"
% (found_region_sum, expected_region_norm, tol)
)
# Revert all
v.set_normalize(False)
found_region_sum = sum(region.get_counts(v))
err = abs(found_region_sum - expected_region_unnorm)
self.assertLessEqual(
err, tol,
"Found unnormalized region sum %s different from expected %s more than error %s"
% (found_region_sum, expected_region_unnorm, tol)
)
@skip_if_abstract
def test_get_genomicsegment_roi_order_false(self):
k = _SAMPLE_BASES[0]
for region in self.region_classes["unique"]:
seg = region.spanning_segment
strand_key = STRAND_KEYS[region.spanning_segment.strand]
#gnd_counts = self.gnds[k].__getitem__(seg,roi_order=False)
gnd_counts = self.gnds[k].get(seg, roi_order=False)
known_counts = self.count_vecs["%s_%s" % (k, strand_key)][seg.start:seg.end]
max_err = max(abs(gnd_counts - known_counts))
self.assertLessEqual(
max_err, self.tol,
"Positionwise count difference '%s' exceeded tolerance '%s' for %s __getitem__ with roi_order==False for sample test %s"
% (self.tol, max_err, self.native_format, k)
)
@skip_if_abstract
def test_get_genomicsegment_roi_order_true(self):
k = _SAMPLE_BASES[0]
for region in self.region_classes["unique"]:
seg = region.spanning_segment
strand_key = STRAND_KEYS[region.spanning_segment.strand]
gnd_counts = self.gnds[k].get(seg, roi_order=True)
known_counts = self.count_vecs["%s_%s" % (k, strand_key)][seg.start:seg.end]
if seg.strand == "-":
known_counts = known_counts[::-1]
max_err = max(abs(gnd_counts - known_counts))
self.assertLessEqual(
max_err, self.tol,
"Positionwise count difference '%s' exceeded tolerance '%s' for %s __getitem__ with roi_order==True for sample test %s"
% (self.tol, max_err, self.native_format, k)
)
@skip_if_abstract
def test_getitem_genomicsegment(self):
k = _SAMPLE_BASES[0]
for region in self.region_classes["unique"]:
seg = region.spanning_segment
strand_key = STRAND_KEYS[region.spanning_segment.strand]
gnd_counts = self.gnds[k].__getitem__(seg)
known_counts = self.count_vecs["%s_%s" % (k, strand_key)][seg.start:seg.end]
if seg.strand == "-":
known_counts = known_counts[::-1]
max_err = max(abs(gnd_counts - known_counts))
self.assertLessEqual(
max_err, self.tol,
"Positionwise count difference '%s' exceeded tolerance '%s' for %s __getitem__ with roi_order==True for sample test %s"
% (self.tol, max_err, self.native_format, k)
)
@skip_if_abstract
def test_getitem_segmentchain(self):
k = _SAMPLE_BASES[0]
for region in self.region_classes["unique"]:
strand_key = STRAND_KEYS[region.spanning_segment.strand]
gnd_counts = self.gnds[k][region] # test
self.assertGreater(gnd_counts.sum(), 0, "Region is empty in sample %s" % k)
known_counts = _get_ivc_numpy_counts(region, self.count_vecs["%s_%s" % (k, strand_key)])
max_err = max(abs(gnd_counts - known_counts))
self.assertLessEqual(
max_err, self.tol,
"Positionwise count difference '%s' exceeded tolerance '%s' for %s import for sample test %s"
% (self.tol, max_err, self.native_format, k)
)
class AbstractExportableGenomeArrayHelper(AbstractGenomeArrayHelper):
@skip_if_abstract
def test_variablestep_export(self):
self.variablestep_and_bedgraph_export_helper(
"variable_step", self.test_class.to_variable_step
)
@skip_if_abstract
def test_bedgraph_export(self):
self.variablestep_and_bedgraph_export_helper("bedgraph", self.test_class.to_bedgraph)
@attr(test="unit")
@attr(speed="slow")
class TestGenomeArray(AbstractExportableGenomeArrayHelper):
"""Test case for :py:class:`GenomeArray`"""
set_up = False
has_gnds = False
def __init__(
self,
methodName='runTest',
params=_GENOME_ARRAY_PARAMS,
test_folder=resource_filename("plastid", "test/data/mini"),
tol=1e-8
):
"""Initialize test case to run a single method.
We override this method to make sure expensive operations are only run when
the first instance is made, and then stored in class attributes
Parameters
----------
methodName : str
Name of method being run. Required by :py:class:`unittest.TestCase`
params : dict
Parameters specific to the set-up of test suites for specific
|AbstractGenomeArray| subclasses. Don't change these
test_folder : str or Resource
Real or virtual location of folder of test data
tol : float
Tolerance for numerical differences between expected and observed
values in the various tests
"""
AbstractGenomeArrayHelper.__init__(
self, methodName=methodName, params=params, test_folder=test_folder, tol=tol
)
if self.__class__.has_gnds == False:
self.__class__.gnds = _read_bowtie_files_to_genome_arrays(
self.test_folder, self.test_class
)
self.__class__.has_gnds = True
#TestGenomeArray.setUpClassOnlyOnce()
def test_setitem_genomicsegment_scalar(self):
ga = GenomeArray({"chrA": 2000})
segplus = GenomicSegment("chrA", 50, 100, "+")
segminus = GenomicSegment("chrA", 50, 100, "-")
# scalar set
ga[segplus] = 52
ga[segminus] = 342
self.assertTrue(
(ga._chroms["chrA"]["+"][50:100] == 52).all(),
"%s failed scalar genomicsegment __setitem__ for plus strand."
)
self.assertTrue(
(ga._chroms["chrA"]["-"][50:100] == 342).all(),
"%s failed scalar genomicsegment __setitem__ for minus strand."
)
self.assertEqual(ga.sum(), 52 * len(segplus) + 342 * len(segminus))
def test_setitem_genomicsegment_vector(self):
ga = GenomeArray({"chrA": 2000})
segplus = GenomicSegment("chrA", 50, 100, "+")
segminus = GenomicSegment("chrA", 50, 100, "-")
# vector set
r1 = numpy.random.randint(0, high=242, size=50)
r2 = numpy.random.randint(0, high=242, size=50)
ga[segplus] = r1
ga[segminus] = r2
self.assertTrue(
(ga._chroms["chrA"]["+"][50:100] == r1).all(),
"%s failed vector genomicsegment __setitem__ for plus strand."
)
self.assertTrue(
(ga._chroms["chrA"]["-"][50:100] == r2[::-1]).all(),
"%s failed vector genomicsegment __setitem__ for minus strand."
)
self.assertEqual(ga.sum(), r1.sum() + r2.sum())
def test_setitem_segmentchain_scalar(self):
ga = GenomeArray({"chrA": 2000})
pluschain = SegmentChain(
GenomicSegment("chrA", 50, 100, "+"),
GenomicSegment("chrA", 150, 732, "+"),
GenomicSegment("chrA", 1800, 2500, "+"),
)
minuschain = SegmentChain(
GenomicSegment("chrA", 50, 100, "-"),
GenomicSegment("chrA", 150, 732, "-"),
GenomicSegment("chrA", 1800, 2500, "-"),
)
ga[pluschain] = 31
ga[minuschain] = 424
for seg in pluschain:
self.assertTrue((ga._chroms[seg.chrom][seg.strand][seg.start:seg.end] == 31).all())
for seg in minuschain:
self.assertTrue((ga._chroms[seg.chrom][seg.strand][seg.start:seg.end] == 424).all())
self.assertEqual(ga.sum(), 31 * pluschain.length + 424 * minuschain.length)
def test_setitem_segmentchain_vector(self):
ga = GenomeArray({"chrA": 2000})
pluschain = SegmentChain(
GenomicSegment("chrA", 50, 100, "+"),
GenomicSegment("chrA", 150, 732, "+"),
GenomicSegment("chrA", 1800, 2500, "+"),
)
minuschain = SegmentChain(
GenomicSegment("chrA", 50, 100, "-"),
GenomicSegment("chrA", 150, 732, "-"),
GenomicSegment("chrA", 1800, 2500, "-"),
)
plusvec = numpy.random.randint(0, high=250, size=pluschain.length)
minusvec = numpy.random.randint(0, high=250, size=minuschain.length)
ga[pluschain] = plusvec
ga[minuschain] = minusvec
x = 0
for seg in pluschain:
subvec = ga._chroms["chrA"]["+"][seg.start:seg.end]
self.assertTrue((subvec == plusvec[x:x + len(subvec)]).all())
x += len(subvec)
x = 0
for seg in minuschain:
subvec = ga._chroms["chrA"]["-"][seg.start:seg.end][::-1]
self.assertTrue(
(subvec == minusvec[len(minusvec) - x - len(subvec):len(minusvec) - x]).all()
)
x += len(subvec)
self.assertEqual(ga.sum(), plusvec.sum() + minusvec.sum())
def variablestep_and_bed_import_helper(self, wiggle_type):
"""Helper function to evaluate tests on variable step wiggle or BEDgraph import
Parameters
----------
wiggle_type : str
Type of wiggle file. "variable_step" or "bedgraph"
"""
gnd = self.test_class()
with open(os.path.join(self.test_folder, _TEST_FILES["%s_%s" % (wiggle_type, "fw")])) as fh:
gnd.add_from_wiggle(fh, "+")
with open(os.path.join(self.test_folder, _TEST_FILES["%s_%s" % (wiggle_type, "rc")])) as fh:
gnd.add_from_wiggle(fh, "-")
# Make sure imported counts are nonzero
self.assertGreater(gnd.sum(), 0, "Import of %s yielded no counts!" % wiggle_type)
chrA_len = gnd.lengths()["chrA"]
for strand, trackstub, label in [("+", "fw", "plus"), ("-", "rc", "minus")]:
my_vec = self.count_vecs["fiveprime_0_%s" % trackstub]
vec_len = len(my_vec)
empty_iv = GenomicSegment("chrA", vec_len, chrA_len, strand)
nonempty_iv = GenomicSegment("chrA", 0, vec_len, strand)
nonempty_vec = gnd.get(nonempty_iv, roi_order=False)
# make sure count vector has requisite counts
self.assertGreater(my_vec.sum(), 0)
# make sure sums are equal
self.assertEquals(my_vec.sum(), nonempty_vec.sum())
# make sure all regions after count vector are empty
self.assertEquals(
gnd[empty_iv].sum(), 0, "Found counts in region that should be empty."
)
# make sure all positions in regions up to count vector are correct
max_err = abs(my_vec - nonempty_vec).max()
self.assertLessEqual(
max_err, self.tol,
"Positionwise count difference %s exceeded tolerance %s for wiggle import on %s strand"
% (max_err, self.tol, label)
)
def test_bedgraph_import(self):
self.variablestep_and_bed_import_helper("bedgraph")
def test_variablestep_import(self):
self.variablestep_and_bed_import_helper("variable_step")
def test_genome_wide_scalar_plus_equals_sum(self):
chroms = {"chrA": 1000, "chrB": 10000}
expected_length = 2 * sum(chroms.values())
gnd = self.test_class(chroms)
self.assertEqual(len(gnd), expected_length)
gnd += 5
self.assertEqual(gnd.sum(), 5 * expected_length)
gnd -= 5
self.assertEqual(gnd.sum(), 0)
def test_genome_wide_scalar_plus_equals_not_change_length(self):
chroms = {"chrA": 1000, "chrB": 10000}
expected_length = 2 * sum(chroms.values())
gnd = self.test_class(chroms)
self.assertEqual(len(gnd), expected_length)
gnd += 5
self.assertEqual(len(gnd), expected_length)
def test_genome_wide_scalar_plus_sum(self):
chroms = {"chrA": 1000, "chrB": 10000}
expected_length = 2 * sum(chroms.values())
gnd = self.test_class(chroms)
self.assertEqual(len(gnd), expected_length)
# add scalar
gnd2 = gnd + 5
self.assertEqual(gnd2.sum(), 5 * expected_length)
self.assertEqual(gnd.sum(), 0)
# add scalar to occupied gnd
gnd3 = gnd2 + 1
self.assertEqual(gnd3.sum(), 6 * expected_length)
def test_genome_scalar_times_equals_sum(self):
chroms = {"chrA": 1000, "chrB": 10000}
expected_length = 2 * sum(chroms.values())
gnd = self.test_class(chroms)
self.assertEqual(len(gnd), expected_length)
gnd *= 5
self.assertEqual(gnd.sum(), 0)
gnd += 1
gnd *= 5
self.assertEqual(gnd.sum(), 5 * expected_length)
def test_genome_wide_scalar_times_equals_not_change_length(self):
chroms = {"chrA": 1000, "chrB": 10000}
expected_length = 2 * sum(chroms.values())
gnd = self.test_class(chroms)
self.assertEqual(len(gnd), expected_length)
gnd *= 5
self.assertEqual(len(gnd), expected_length)
def test_genome_wide_scalar_times(self):
chroms = {"chrA": 1000, "chrB": 10000}
expected_length = 2 * sum(chroms.values())
gnd = self.test_class(chroms)
gnd += 1
self.assertEqual(gnd.sum(), expected_length)
gnd2 = gnd * 2
self.assertEqual(gnd2.sum(), 2 * expected_length)
self.assertEqual(gnd.sum(), expected_length)
def test_genome_wide_array_add_same_size(self):
chroms = {"chrA": 1000, "chrB": 10000}
gnd1 = self.test_class(chroms)
gnd2 = self.test_class(chroms)
gnd1 += 1
gnd2 += 3
gnd3 = gnd1 + gnd2
self.assertEquals(gnd3.sum(), gnd2.sum() + gnd1.sum())
iv1plus = GenomicSegment("chrA", 0, 1000, "+")
iv1minus = GenomicSegment("chrA", 0, 1000, "+")
self.assertTrue((gnd3[iv1plus] == gnd2[iv1plus] + gnd1[iv1plus]).all())
self.assertTrue((gnd3[iv1minus] == gnd2[iv1minus] + gnd1[iv1minus]).all())
def test_genome_wide_array_multiply_same_size(self):
chroms = {"chrA": 1000, "chrB": 10000}
gnd1 = self.test_class(chroms)
gnd2 = self.test_class(chroms)
gnd1 += 2
gnd2 += 3
gnd3 = gnd1 * gnd2
iv1plus = GenomicSegment("chrA", 0, 1000, "+")
iv1minus = GenomicSegment("chrA", 0, 1000, "+")
self.assertTrue((gnd3[iv1plus] == gnd2[iv1plus] * gnd1[iv1plus]).all())
self.assertTrue((gnd3[iv1minus] == gnd2[iv1minus] * gnd1[iv1minus]).all())
def test_iadd_no_normalize(self):
chroms = {"chrA": 1000, "chrB": 10000}
gnd = self.test_class(chroms)
gnd.set_normalize(False)
self.assertEqual(0, gnd.sum())
iv1plus = GenomicSegment("chrA", 0, 1000, "+")
iv2plus = GenomicSegment("chrA", 0, 500, "+")
gnd[iv1plus] += 1
self.assertEqual(1000, gnd.sum())
gnd[iv1plus] += 1
self.assertEqual(2000, gnd.sum())
gnd[iv2plus] += 1
self.assertEqual(2500, gnd.sum())
def test_iadd_with_normalize_raises_warning(self):
chroms = {"chrA": 1000, "chrB": 10000}
gnd = self.test_class(chroms)
gnd.set_normalize(True)
def my_func(ga):
ga.set_normalize(True)
ga[GenomicSegment("chrA", 0, 1000, "+")] += 5
# manually reset registry before test
plastid.util.services.exceptions.pl_once_registry = {}
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
my_func(gnd)
got_warning = False
for w in warns:
if "turning off normalization" in str(w.message):
got_warning = True
self.assertTrue(got_warning)
def test_eq(self):
chroms = {"chrA": 1000, "chrB": 10000}
gnd1 = self.test_class(chroms)
gnd2 = self.test_class(chroms)
self.assertEqual(gnd1, gnd2)
# diff chrom nonzero positions
gnd2[GenomicSegment("chrC", 500, 1000, "-")] = 200
self.assertNotEqual(gnd1, gnd2)
# same chroms, nonzero positions, and values
gnd1[GenomicSegment("chrC", 500, 1000, "-")] = 200
self.assertEqual(gnd1, gnd2)
# same chroms and nonzero positions, diff values
gnd1[GenomicSegment("chrC", 500, 1000, "-")] += 200
self.assertNotEqual(gnd1, gnd2)
def test_setters_and_getters(self):
chroms = {"chrA": 1000, "chrB": 10000}
gnd = self.test_class(chroms)
#generate random read data set
for chr_name, chr_len in chroms.items():
for strand in ("+", "-"):
num_iters = numpy.random.randint(50, 100) #000)
starts = numpy.random.randint(0, chr_len - 50, size=num_iters)
ends = [X + numpy.random.randint(0, 50) for X in starts]
vals = numpy.random.randint(0, 100, size=num_iters)
for i in range(num_iters):
iv = GenomicSegment(chr_name, int(starts[i]), int(ends[i]), strand)
gnd[iv] = vals[i]
self.assertEquals(
len(gnd), 2 * sum(chroms.values()),
"Chromosome lengths incorrect: %s vs %s" % (len(gnd), 2 * sum(chroms.values()))
)
# auto-grow
iv1 = GenomicSegment("chrA", 10000, 11000, "+")
iv2 = GenomicSegment("chrA", 10500, 11000, "+")
iv3 = GenomicSegment("chrA", int(5 * 1e5) + 10500, int(5 * 1e5) + 11000, "+")
iv4 = GenomicSegment("chrB", int(5 * 1e5) + 10500, int(5 * 1e5) + 11000, "+")
gnd[iv1] = 1
self.assertEquals(sum(gnd[iv1]), 1000, "Auto-grow failed during set")
gnd[iv2] += 1
self.assertEquals(sum(gnd[iv1]), 1500, "+= failed during set")
self.assertEquals(sum(gnd[iv2]), 1000)
self.assertGreater(gnd.lengths()["chrA"], 1000, "Auto-grow failed chrA")
gnd[iv3] += 1
self.assertEqual(sum(gnd[iv3]), 500, "+= failed during set")
self.assertEqual(sum(gnd[iv4]), 0, "Counts where not expected")
# setters & getters
iv1 = GenomicSegment("chrA", 200, 500, "+")
oldvals = copy.deepcopy(gnd[iv1])
gnd[iv1] = 5
newvals = copy.deepcopy(gnd[iv1])
self.assertTrue((oldvals != newvals).any(), "Set failed")
self.assertEqual(newvals.sum(), 5 * len(newvals))
self.assertEqual(newvals.sum(), 5 * len(iv1))
newvals2 = copy.deepcopy(gnd[iv1])
self.assertTrue((newvals2 == newvals).all(), "Set failed")
newvals = newvals2 = None
gnd[iv1] = oldvals
new_old = gnd[iv1]
self.assertTrue((new_old == oldvals).all(), "Set valed")
# scalar add
gnd_plus5 = gnd + 5
for iv in (iv1, iv2, iv3, iv4):
self.assertTrue(
(gnd_plus5[iv] == (gnd[iv] + 5)).all(),
"Scalar addition globally failed interval test"
)
# FIXME- what is len of a sparse array?
diff = abs(gnd_plus5.sum() - (gnd.sum() + 5 * len(gnd)))
self.assertLess(
diff, self.tol,
"Error in genome-wide scalar addition (%s) exceeded tolerance %s. Raw vals: %s vs %s " %
(diff, self.tol, gnd_plus5.sum(), gnd.sum() + 5 * len(gnd))
)
# scalar multiply
gnd3 = gnd * 3
for iv in (iv1, iv2, iv3, iv4):
self.assertTrue(
(gnd3[iv] == (gnd[iv] * 3)).all(),
"Scalar multip.lication globally failed interal test"
)
quotient = 1.0 - (gnd3.sum() / 3 * gnd.sum())
self.assertLess(
quotient, self.tol,
"Error in scalar multiplication (%s) exceeded tolerance %s" % (diff, self.tol)
)
# genome-wide multiply
gnd4 = gnd + gnd3
gndmul = gnd3 * gnd4
for iv in (iv1, iv2, iv3, iv4):
self.assertTrue(
(gndmul[iv] - (gnd3[iv] * gnd4[iv]) <= self.tol).all(),
"Error in genome-wide multiply failed exceeded tolerance %s" % self.tol
)
# genome-wide add
is_ok = True
for iv in (iv1, iv2, iv3):
is_ok &= sum(gnd4[iv]) > 0
self.assertTrue(
(gnd4[iv] == gnd[iv] + gnd3[iv]).all(),
"Error in genome-wide addition exceeded tolerance %s" % self.tol
)
self.assertGreater(sum(gnd4[iv]), 0)
def test_nonzero(self):
excluded_set = set([])
for region in self.region_classes["repeat"] + self.region_classes["splice"]:
excluded_set |= region.get_position_set()
for k, v in self.gnds.items():
nz_dict = v.nonzero()
for strand in v.strands():
strand_key = STRAND_KEYS[strand]
expected = self.count_vecs["%s_%s" % (k, strand_key)].nonzero()[0]
found = nz_dict["chrA"][strand]
self.assertEqual(set(expected) - excluded_set, set(found) - excluded_set)
@attr(test="unit")
@attr(speed="slow")
class TestSparseGenomeArray(TestGenomeArray):
"""Test suite for |SparseGenomeArray|"""
set_up = False
has_gnds = False
def __init__(
self,
methodName='runTest',
params=_SPARSE_GENOME_ARRAY_PARAMS,
test_folder=resource_filename("plastid", "test/data/mini"),
tol=1e-8
):
"""Initialize test case to run a single method.
We override this method to make sure expensive operations are only run when
the first instance is made, and then stored in class attributes
Parameters
----------
methodName : str
Name of method being run. Required by :py:class:`unittest.TestCase`
params : dict
Parameters specific to the set-up of test suites for specific
AbstractgenomeArray subclasses. Don't change these
test_folder : str or Resource
Real or virtual location of folder of test data
tol : float
Tolerance for numerical differences between expected and observed
values in the various tests
"""
TestGenomeArray.__init__(
self, methodName=methodName, params=params, test_folder=test_folder, tol=tol
)
@attr(test="unit")
@attr(speed="slow")
class TestBigWigGenomeArray(AbstractGenomeArrayHelper):
"""Test suite for |SparseGenomeArray|"""
set_up = False
has_gnds = False
def __init__(
self,
methodName='runTest',
params=_BIGWIG_GENOME_ARRAY_PARAMS,
test_folder=resource_filename("plastid", "test/data/mini"),
tol=1e-3
):
"""Initialize test case to run a single method.
We override this method to make sure expensive operations are only run when
the first instance is made, and then stored in class attributes
Parameters
----------
methodName : str
Name of method being run. Required by :py:class:`unittest.TestCase`
params : dict
Parameters specific to the set-up of test suites for specific
AbstractgenomeArray subclasses. Don't change these
test_folder : str or Resource
Real or virtual location of folder of test data
tol : float
Tolerance for numerical differences between expected and observed
values in the various tests
"""
AbstractGenomeArrayHelper.__init__(
self, methodName=methodName, params=params, test_folder=test_folder, tol=tol
)
if self.__class__.has_gnds == False:
TestBigWigGenomeArray.gnds = TestBigWigGenomeArray.read_bigwig_files()
self.__class__.has_gnds = True
@staticmethod
def read_bigwig_files():
"""Read bigwig files into a dictionary
"""
dtmp = {}
for k in _SAMPLE_BASES:
dtmp[k] = ga = BigWigGenomeArray(fill=0)
fw = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_%s_fw.bw" % k)
rc = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_%s_rc.bw" % k)
ga.add_from_bigwig(fw, "+")
ga.add_from_bigwig(rc, "-")
return dtmp
def test_multiple_same_strand_sum(self):
# should see sum double
bigwigfile = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_center_12_fw.bw")
bw = BigWigGenomeArray(fill=0)
bw.add_from_bigwig(bigwigfile, "+")
self.assertLessEqual(abs(bw.sum() - 4000), self.tol)
bw.add_from_bigwig(bigwigfile, "+")
self.assertLessEqual(abs(bw.sum() - 8000), self.tol)
bw.add_from_bigwig(bigwigfile, "+")
self.assertLessEqual(abs(bw.sum() - 12000), self.tol)
def test_multiple_same_strand_fetch(self):
bigwigfw = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_center_12_fw.bw")
bigwigrc = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_center_12_rc.bw")
wigfw = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_center_12_fw.wig")
wigrc = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_center_12_rc.wig")
bw = BigWigGenomeArray(fill=0)
bw.add_from_bigwig(bigwigfw, "+")
bw.add_from_bigwig(bigwigfw, "+")
bw.add_from_bigwig(bigwigrc, "-")
bw.add_from_bigwig(bigwigrc, "-")
ga = GenomeArray(bw.lengths())
with open(wigfw) as fh:
ga.add_from_wiggle(fh, "+")
with open(wigrc) as fh:
ga.add_from_wiggle(fh, "-")
for chrom, length in bw.lengths().items():
for strand in bw.strands():
seg = GenomicSegment(chrom, 0, length, strand)
maxdiff = abs(bw[seg] - 2 * ga[seg]).max()
msg = "Maximum difference for multiple_strand_fetch (%s) exceeds tolerance (%s)" % (
maxdiff, self.tol
)
self.assertLessEqual(maxdiff, self.tol, msg)
def test_to_genome_array(self):
for test, orig in self.gnds.items():
fw = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_%s_fw.wig" % test)
rc = os.path.join(TestBigWigGenomeArray.test_folder, "wig", "bw_%s_rc.wig" % test)
expected = GenomeArray()
with open(fw) as fh:
expected.add_from_wiggle(fh, "+")
with open(rc) as fh:
expected.add_from_wiggle(fh, "-")
found = orig.to_genome_array()
for chrom, length in expected.lengths().items():
for strand in ("+", "-"):
seg = GenomicSegment(chrom, 0, length, strand)
diffvec = abs(orig[seg] - found[seg])
diffmax = diffvec.max()
msg1 = "Maximum difference between exported GenomeArray and BigWigGenomeArray (%s) exceeds tolerance (%s) for test '%s' strand '%s'" % (
diffmax, self.tol, test, strand
)
self.assertLessEqual(diffmax, self.tol, msg1)
for chrom, length in expected.lengths().items():
for strand in ("+", "-"):
seg = GenomicSegment(chrom, 0, length, strand)
diffvec = abs(expected[seg] - found[seg])
diffmax = diffvec.max()
msg1 = "Maximum difference between exported GenomeArray and wiggle-imported array (%s) exceeds tolerance (%s) for test '%s' strand '%s'" % (
diffmax, self.tol, test, strand
)
self.assertLessEqual(diffmax, self.tol, msg1)
class FakeDict(object):
"""Creates a dictionary-like object that provies dictionary-like access
to a BAMGenomeArray under various mapping rules, as if it were a collection
of separate GenomeArrays. This is only a convenience class to allow us to
re-use functions in the |AbstractGenomeArrayHelper| test suite in
|TestBAMGenomeArray|
"""
def __init__(self, bga, map_functions=_BAM_MAP_RULES):
"""Create a FakeDict
Parameters
----------
bga : |BAMGenomeArray|
map_functions : dict
Dictionary mapping descriptive names to mapping functions,
such as those made by :py:func:`plastid.genomics.genome_array.FivePrimeMapFactory`
"""
self.bga = bga
self.map_functions = map_functions
def __getitem__(self, key):
self.bga.set_mapping(self.map_functions[key])
return self.bga
def items(self):
for k in self.map_functions:
yield (k, self[k])
def values(self):
# must use key, to trigger map setting in __getitem__
for k in self.map_functions:
yield self[k]
@attr(test="unit")
@attr(speed="slow")
class TestBAMGenomeArray(AbstractExportableGenomeArrayHelper):
set_up = False
has_gnds = False
def __init__(
self,
methodName='runTest',
params=_BAM_GENOME_ARRAY_PARAMS,
test_folder=resource_filename("plastid", "test/data/mini"),
tol=1e-8
):
"""Initialize test case to run a single method.
We override this method to make sure expensive operations are only run when
the first instance is made, and then stored in class attributes
Parameters
----------
methodName : str
Name of method being run. Required by py:class:`unittest.TestCase`
params : dict
Parameters specific to the set-up of test suites for specific
AbstractgenomeArray subclasses. Don't change these
test_folder : str or Resource
Real or virtual location of folder of test data
tol : float
Tolerance for numerical differences between expected and observed
values in the various tests
"""
AbstractGenomeArrayHelper.__init__(
self, methodName=methodName, params=params, test_folder=test_folder, tol=tol
)
if self.__class__.has_gnds == False:
bga = BAMGenomeArray(
[pysam.Samfile(os.path.join(self.test_folder, _TEST_FILES["bam"]), "rb")]
)
TestBAMGenomeArray.gnds = FakeDict(bga)
TestBAMGenomeArray.bga = bga
self.__class__.has_gnds = True
def test_open_str_filename(self):
z = BAMGenomeArray(os.path.join(self.test_folder, _TEST_FILES["bam"]))
self.assertEqual(z.sum(), self.bga.sum())
def test_open_multi_list(self):
v = [os.path.join(self.test_folder, _TEST_FILES["bam"])] * 2
z = BAMGenomeArray(v)
self.assertEqual(z.sum(), 2 * self.bga.sum())
def test_open_multi_filename(self):
f = os.path.join(self.test_folder, _TEST_FILES["bam"])
z = BAMGenomeArray(f, f)
self.assertEqual(z.sum(), 2 * self.bga.sum())
def mutable_conversion_helper(self, new_class):
"""Helper function to test conversion of |BAMGenomeArray| to various |MutableAbstractGenomeArray| types
Parameters
----------
new_class : class
Non-abstract subclass of |MutableAbstractGenomeArray|
"""
ivplus = GenomicSegment("chrA", 0, self.bga.lengths()["chrA"], "+")
ivminus = GenomicSegment("chrA", 0, self.bga.lengths()["chrA"], "-")
for k, v in self.gnds.items():
new_gnd = v.to_genome_array(array_type=new_class)
for iv in (ivplus, ivminus):
self.assertGreater(v[iv].sum(), 0)
self.assertGreater(new_gnd[iv].sum(), 0)
max_err = max(abs(v[iv] - new_gnd[iv]))
err_message = "%s BAMGenomeArray conversion to %s error %s exceeded tolerance %s." % (
k, new_class.__name__, max_err, self.tol
)
self.assertLess(max_err, self.tol, err_message)
def test_to_genome_array(self):
self.mutable_conversion_helper(GenomeArray)
def test_to_sparse_genome_array(self):
self.mutable_conversion_helper(SparseGenomeArray)
def variablestep_and_bedgraph_export_helper(self, wiggle_type, export_function):
# override function so we can test window size parameters in export
for window_size in (1, 2, 5, 10, 25, 100, 500, 1000, 10000):
AbstractGenomeArrayHelper.variablestep_and_bedgraph_export_helper(
self,
wiggle_type,
export_function,
input_class=GenomeArray,
window_size=window_size
)
def test_add_remove_filter(self):
# add a plus-strand filter and require minus strand regions be zero
# then remove and watch it come back
bga = self.bga
def minus_only_filter(read):
return read.is_reverse
entire_iv_plus = GenomicSegment("chrA", 0, bga.lengths()["chrA"], "+")
entire_iv_minus = GenomicSegment("chrA", 0, bga.lengths()["chrA"], "-")
# fetch counts & check
pre_plus = bga[entire_iv_plus]
self.assertGreater(pre_plus.sum(), 0)
pre_minus = bga[entire_iv_minus]
self.assertGreater(pre_minus.sum(), 0)
# add filter, re-fetch
bga.add_filter("minus_only", minus_only_filter)
post_plus = bga[entire_iv_plus]
post_minus = bga[entire_iv_minus]
self.assertEqual(post_plus.sum(), 0)
self.assertFalse((post_plus == pre_plus).all())
self.assertEqual(post_minus.sum(), pre_minus.sum())
self.assertTrue((post_minus == pre_minus).all())
# remove_filter, re_fetch
bga.remove_filter("minus_only")
post_post_plus = bga[entire_iv_plus]
post_post_minus = bga[entire_iv_minus]
self.assertEqual(post_post_plus.sum(), pre_plus.sum())
self.assertTrue((post_post_plus == pre_plus).all())
self.assertEqual(post_post_minus.sum(), pre_minus.sum())
self.assertTrue((post_post_minus == pre_minus).all())
#===============================================================================
# INDEX: tools for generating test datasets with known results
#===============================================================================
def _detect_or_create_folders(base_folder):
"""Creates and tests folder hierarchy needed for unit/integrative tests below.
Parameters
----------
base_folder : str
path to base folder in which test data will be created
"""
sub_folders = ["fasta", "ebwt", "count_vectors", "align", "wig", "bed"]
if not os.path.isdir(base_folder):
os.mkdir(base_folder)
for name in sub_folders:
sf = os.path.join(base_folder, name)
if not os.path.isdir(sf):
os.mkdir(sf)
# BED file for use later
with open(os.path.join(base_folder, _TEST_FILES["bed"]), "w") as fout:
fout.write(TEST_CHR_BED)
fout.close()
# .juncs file for tophat
with open(os.path.join(base_folder, _TEST_FILES["juncs"]), "w") as fout:
fout.write(TEST_CHR_JUNCS)
fout.close()
def detect_base_folder(func):
"""Decorator function to ensure that folders required by functions below exist.
For this decorator to work, the function it wraps MUST require base_folder
as its first parameter.
Parameters
----------
func : Function
Function to decorate
Returns
-------
Function : wrapped function
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
_detect_or_create_folders(args[0])
return func(*args, **kwargs)
return new_func
@detect_base_folder
def create_synthetic_genome(base_folder):
"""Create a synthetic genome with unique and multimapping regions annotated
as in TEST_CHR_BED
Parameters
----------
base_folder : str
path to base folder in which test data will be created
Genome will base saved as base_folder/fasta/chrA.fa
Returns
-------
str : synthetic genome sequence
"""
ustart = 100
ulength = 1000
spacer_length = 100
#uend = ustart + ulength
#dstart = uend + spacer_length
dlength = 500
dspacer = 50
#dstart = uend + spacer_length
#dend = dstart + dlength + dspacer + dlength
#splice_start = dend + spacer_length
splice_flank_length = 25
splice_spacer = 75
# generate sequence
unique_region = random_seq(ulength)
duplicated_repeat = random_seq(dlength)
duplicated_region = duplicated_repeat + ("N" * dspacer) + duplicated_repeat
splice_region = random_seq(splice_flank_length) + ("N" * splice_spacer
) + random_seq(splice_flank_length)
# write to FASTA
total_genome = (
"N" * ustart
) + unique_region + "N" * spacer_length + duplicated_region + "N" * spacer_length + splice_region + "N" * 100
with open(os.path.join(base_folder, _TEST_FILES["genome"]), "w") as fh:
fh.write(">%s\n%s\n" % ("chrA", total_genome))
fh.close()
return total_genome
@detect_base_folder
def create_bowtie_index(
base_folder, bowtie_location=os.path.join(os.path.sep, "usr", "local", "bin")
):
"""Build bowtie indices to enable alignments in bowtie and tophat
against synthetic genome. Creates bowtie indices in base_folder/ebwt.
Requires a FASTA file of the synthetic genome in base_folder/fasta/chrA.fa,
so run create_synthetic_genome() first
Parameters
----------
base_folder : str
path to base folder in which test data will be created
bowtie_location : str
path to folder containing bowtie-build
Returns
-------
int : exit status of bowtie-build
"""
unspliced_args = [
os.path.join(bowtie_location, "bowtie-build"),
os.path.join(base_folder, _TEST_FILES["genome"]),
os.path.join(base_folder, _TEST_FILES["bowtie_index"])
]
unspliced_exit = subprocess.call(unspliced_args)
return unspliced_exit #| spliced_exit
def _ndarray_to_variable_step(count_vec, fh, name):
"""Write a numpy.ndarray to a variableStep wiggle file
Parameters
----------
count_vec : numpy.ndarray
vector of counts
fh : file-like
open filehandle
name : str
Track name
"""
fh.write("track type=wiggle_0 name=%s\n" % name)
fh.write("variableStep chrom=chrA span=1\n")
for i in count_vec.nonzero()[0]:
val = count_vec[i]
fh.write("%s\t%s\n" % (i + 1, val))
def _ndarray_to_bedgraph(count_vec, fh, name):
"""Write a numpy.ndarray to a BEDGraph file
Parameters
----------
count_vec : numpy.ndarray
vector of counts
fh : file-like
open filehandle
name : str
Track name
"""
fh.write("track type=bedGraph name=%s\n" % name)
last_val = count_vec[0]
start_i = 0
for i, val in enumerate(count_vec):
if val != last_val:
fh.write("%s\t%s\t%s\t%s\n" % ("chrA", start_i, i, last_val))
start_i = i
last_val = val
fh.write("%s\t%s\t%s\t%s\n" % ("chrA", start_i, i + 1, last_val))
@detect_base_folder
def generate_reads(
base_folder, reads_per_region=DEFAULT_READS_PER_REGION, read_length=DEFAULT_READ_LENGTH
):
"""Generates 30-nucleotide reads from a genome created by create_synthetic_genome,
choosing from uniquely-mapping and multimapping regions annotated in
TEST_CHR_BED. 10000 reads are generated for each region type. Reads are
returned in FASTA format. Also saves a numpy array of how many reads are expected
to align to each nucleotide position in the synthetic genome, if reads are mapped
at their 5' ends.
Parameters
----------
base_folder : str
path to base folder in which test data will be created.
Reads will base saved as base_folder/fasta/chrA_reads.fa.
Count vectors will base saved as various text files in
base_folder/count_vectors
reads_per_region : int
Number of reads to generate in each region
read_length : int
Length of reads to generate
Returns
-------
dict : dictionary of numpy.ndarrays corresponding to expected number of counts
at each genomic position, under various read alignment mapping rules
"""
with open(os.path.join(base_folder, "fasta", "chrA.fa")) as fh:
genome = SeqIO.to_dict(SeqIO.parse(fh), "fasta")
len_A = len(genome["chrA"])
# TODO : align
count_vectors = {
"fiveprime_0_fw": numpy.zeros(len_A).astype(int),
"fiveprime_15_fw": numpy.zeros(len_A).astype(int),
"threeprime_0_fw": numpy.zeros(len_A).astype(int),
"threeprime_15_fw": numpy.zeros(len_A).astype(int),
"center_0_fw": numpy.zeros(len_A),
"center_12_fw": numpy.zeros(len_A),
"fiveprime_0_rc": numpy.zeros(len_A).astype(int),
"fiveprime_15_rc": numpy.zeros(len_A).astype(int),
"threeprime_0_rc": numpy.zeros(len_A).astype(int),
"threeprime_15_rc": numpy.zeros(len_A).astype(int),
"center_0_rc": numpy.zeros(len_A),
"center_12_rc": numpy.zeros(len_A),
} # yapf: disable
with open(os.path.join(base_folder, _TEST_FILES["reads"]), "w") as read_fh:
regions = filter(lambda x: "intron" not in x.get_name()\
and "entire" not in x.get_name(),
fetch_regions())
for region in regions:
strand_key = STRAND_KEYS[region.spanning_segment.strand]
my_seq = region.get_sequence(genome)
# choose 5' read locations
read_locs = numpy.random.randint(
0, high=len(my_seq) - read_length + 1, size=reads_per_region
)
# generate FASTA File
# and save read positions to count vectors under various alignment mapping rules
for n, loc in enumerate(read_locs):
# write reads
read_fh.write(
">%s_%s\n%s\n" % (region.get_name(), n, my_seq[loc:loc + read_length])
)
_, position, _ = region.get_genomic_coordinate(loc)
# populate 5' and 3' mapped count vectors
for offset in (0, 15):
_, position, _ = region.get_genomic_coordinate(loc + offset)
count_vectors["fiveprime_%s_%s" % (offset, strand_key)][position] += 1
_, position, _ = region.get_genomic_coordinate(loc + read_length - offset - 1)
count_vectors["threeprime_%s_%s" % (offset, strand_key)][position] += 1
# populate center-mapped count vectors
read_positions = region.get_subchain(loc, loc + read_length).get_position_list()
assert len(read_positions) == read_length
for pos in read_positions:
count_vectors["center_0_%s" % strand_key][pos] += 1.0 / len(read_positions)
assert len(read_positions[12:-12]) == read_length - 24
for pos in read_positions[12:-12]:
count_vectors["center_12_%s" % strand_key][pos] += 1.0 / (
len(read_positions) - 24
)
for k, v in count_vectors.items():
numpy.savetxt(os.path.join(base_folder, "count_vectors", "%s.txt" % k), v)
# export 5' mapped BEDGraph files
with open(os.path.join(base_folder, _TEST_FILES["bedgraph_fw"]), "w") as bedgraph_fw:
_ndarray_to_bedgraph(count_vectors["fiveprime_0_fw"], bedgraph_fw, base_folder)
with open(os.path.join(base_folder, _TEST_FILES["bedgraph_rc"]), "w") as bedgraph_rc:
_ndarray_to_bedgraph(count_vectors["fiveprime_0_rc"], bedgraph_rc, base_folder)
# export 5' mapped variableStep wiggle files
with open(os.path.join(base_folder, _TEST_FILES["variable_step_fw"]), "w") as vs_fw:
_ndarray_to_variable_step(count_vectors["fiveprime_0_fw"], vs_fw, base_folder)
with open(os.path.join(base_folder, _TEST_FILES["variable_step_rc"]), "w") as vs_rc:
_ndarray_to_variable_step(count_vectors["fiveprime_0_rc"], vs_rc, base_folder)
return count_vectors
@detect_base_folder
def perform_alignments(
base_folder,
bowtie_location=os.path.join(os.path.sep, "usr", "local", "bin"),
tophat_location=os.path.join(os.path.sep, "usr", "local", "bin"),
samtools_location=os.path.join(os.path.sep, "usr", "local", "bin"),
):
"""Perform alignments of generated reads against synthetic genome,
in both Tophat and Bowtie so that both BAM and bowtie input may be tested.
Note: spliced reads will not align in bowtie.
Read alignments will be placed in base_folder/align
Requires a bowtie index of the synthetic genome in base_folder/ebwt, and
syntheti reads to align in base_folder/fasta/chrA_reads.fa
so run build_bowtie_index() and generate_reads() first.
Parameters
----------
base_folder : str
path to base folder in which test data will be created.
bowtie_location : str
path to folder containing bowtie executable
tophat_location : str
path to folder containing tophat executable
samtools_location : str
path to folder containing samtools executable
Returns
-------
int : ORed exit status of bowtie and tophat (0 if both successful; 1 otherwise)
"""
# align with no mismatches, choosing 1 random alignment from repeat regions
bowtie_args = [
os.path.join(bowtie_location, "bowtie"), "-v0", "-k1", "--best", "-f", "--un",
os.path.join(base_folder, "align", "chrA_unspliced_unaligned.fa"),
os.path.join(base_folder, _TEST_FILES["bowtie_index"]),
os.path.join(base_folder, _TEST_FILES["reads"]),
os.path.join(base_folder, _TEST_FILES["bowtie"])
]
# align with no mismatches, choosing 1 random alignment from repeat regions
tophat_args = [
os.path.join(bowtie_location, "tophat"),
"--bowtie1",
"--read-mismatches=0",
"--min-intron-length=20",
"--library-type=fr-firststrand",
"--raw-juncs",
os.path.join(base_folder, _TEST_FILES["juncs"]),
"--no-novel-juncs",
"-o",
os.path.join(base_folder, "align", "tophat"),
os.path.join(base_folder, _TEST_FILES["bowtie_index"]),
os.path.join(base_folder, _TEST_FILES["reads"]),
]
samtools_multi_args = [
os.path.join(samtools_location, "samtools"), "view", "-F", "256",
os.path.join(base_folder, "align", "tophat", "accepted_hits.bam"), "-b", "-o",
os.path.join(base_folder, "align", "chrA_tophat.bam")
]
samtools_index_args = [
os.path.join(samtools_location, "samtools"), "index",
os.path.join(base_folder, "align", "chrA_tophat.bam")
]
bowtie_exit = subprocess.call(bowtie_args)
tophat_exit = subprocess.call(tophat_args)
samtools_exit_1 = subprocess.call(samtools_multi_args)
samtools_exit_2 = subprocess.call(samtools_index_args)
return bowtie_exit | tophat_exit | samtools_exit_1 | samtools_exit_2
def create_dataset(
base_folder,
bowtie_location=os.path.join(os.path.sep, "usr", "local", "bin"),
tophat_location=os.path.join(os.path.sep, "usr", "local", "bin"),
samtools_location=os.path.join(os.path.sep, "usr", "local", "bin"),
):
"""Create a ground-truth dataset for testing |GenomeArray|
This dataset includes a synthetic genome of random sequence, containing
uniquely-mapping and multimapping regions; short sequence reads generated
from these regions; alignments of these reads made in bowtie and tophat;
and saved numpy tables indicating how many counts should appear at each
genomic position under various mapping rules and offsets.
Parameters
----------
base_folder : str
path to base folder in which test data will be created.
bowtie_location : str
path to folder containing bowtie executable
tophat_location : str
path to folder containing tophat executable
samtools_location : str
path to folder containing samtools executable
Returns
-------
dict: dictionary containing the genome sequence, count vectors,
and alignment status
"""
dtmp = {}
dtmp["base_folder"] = base_folder
dtmp["genome_str"] = create_synthetic_genome(base_folder)
dtmp["aligned"] = False
if create_bowtie_index(base_folder, bowtie_location=bowtie_location) == 0:
dtmp.update(generate_reads(base_folder))
if perform_alignments(base_folder, bowtie_location=bowtie_location,
tophat_location=tophat_location,
samtools_location=samtools_location) == 0:
dtmp["aligned"] = True
return dtmp
return dtmp
|
the-stack_106_13964
|
"""
-------------------------------------------------------------------------
Example script for feature extraction and comparison of features
-------------------------------------------------------------------------
Here we show the results of different features calculated from the particle
positions or the images itself. This is to give an initial idea of what the
different features mean to us and how we can use them.
.. moduleauthor:: Michael Barbier <[email protected]>
"""
# -------------------------------------------------------------------------
# Import libraries
# -------------------------------------------------------------------------
import cv2 as cv
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
from scipy.spatial import Voronoi, voronoi_plot_2d
from scipy.spatial import Delaunay
from time import time
import argparse
# Personal modules
from mlpy import util, detection, mio, plots, patterns as pr
# Reload personal modules (see: https://docs.python.org/3.8/tutorial/modules.html)
import importlib
importlib.reload(util)
importlib.reload(mio)
importlib.reload(plots)
importlib.reload(detection)
importlib.reload(pr)
# Print the OpenCV version for debugging
print(cv.__version__)
# -------------------------------------------------------------------------
"""
# -------------------------------------------------------------------------
# Script
# -------------------------------------------------------------------------
"""
util.my_comment('Load image with crystal (hexagonal) and gaseous state')
data_folder = '../data/phase_states'
mask_folder = '../data/phase_states/mask'
image_name = 'quasi_1.png'
#image_name = 'hexa_gaseous.png'
im = cv.imread(os.path.join(data_folder, image_name), 0)
mask_gt = cv.imread(os.path.join(mask_folder, image_name), 0)
util.my_comment('Detection of the particles in the image')
saturation_perc = 0.1
radius = 2
is_dark = 0
circle_list, im_gray, im_norm, im_blur = detection.detection(im, "Laplace", saturation_perc, radius, is_dark)
im_circles = np.copy(im_gray)
radius_show = 4
plots.draw_circles(circle_list, im_circles, radius=radius_show)
# Plot the detected particles
plt.subplot(121), plt.imshow(im, cmap='gray')
plt.title('Original image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(im_circles)
plt.title('Detection'), plt.xticks([]), plt.yticks([])
#plt.show()
util.my_comment('Calculation of features')
util.my_sub_comment('Lindemann features')
lindemann_list_5 = pr.compute_lindemann_parameter(circle_list, 5*radius)
lindemann_list_10 = pr.compute_lindemann_parameter(circle_list, 10*radius)
lindemann_list_20 = pr.compute_lindemann_parameter(circle_list, 20*radius)
util.my_sub_comment('Voronoi diagram features')
circle_list_array = np.array(circle_list)
centers = circle_list_array[:, 0:2]
n_centers = centers.shape[0]
vor = pr.compute_voronoi_diagram(centers)
#print(vor.point_region)
max_area = 1000.0
min_length_ratio = 0.01
[point_list, vertices_list, inside_point_list, areas_list, n_neighbors_list] = pr.features_voronoi(im, vor, n_centers, max_area)
norm_vv_list, n_neighbors_norm_list = pr.normalized_sides_voronoi(vertices_list, min_length_ratio)
"""
ax1 = plt.subplot(111)
plt.imshow(im, cmap='gray')
plots.plot_polygons(vertices_list, areas_list, ax=ax1, alpha=0.2, linewidth=0.7)
ax2 = plt.subplot(111)
plt.imshow(im, cmap='gray')
plots.plot_polygons(vertices_list, n_neighbors_list, ax=ax2, alpha=0.2, linewidth=0.7)
ax3 = plt.subplot(111)
plt.imshow(im, cmap='gray')
plots.plot_polygons(vertices_list, inside_point_list, ax=ax3, alpha=0.2, linewidth=0.7)
ax4 = plt.subplot(111)
plt.imshow(im, cmap='gray')
plots.plot_polygons(vertices_list, n_neighbors_norm_list, ax=ax4, alpha=0.2, linewidth=0.7)
"""
#fig = voronoi_plot_2d(vor, show_points=True, show_vertices=True, s=4)
#plt.show()
util.my_sub_comment('Delaunay triangulation features')
neighbors_list, dd_list, dd_mean, dd_std, aa_list, aa_mean, aa_std = pr.features_delaunay(centers)
df = pd.DataFrame(
{
'index': pd.Series(range(len(centers))),
'center_x': pd.Series(centers[:, 0], dtype='float32'),
'center_y': pd.Series(centers[:, 1], dtype='float32'),
'valid_point': pd.Series(inside_point_list),
'area': pd.Series(areas_list, dtype='float32'),
'n_norm_NN': pd.Series(n_neighbors_norm_list),
'n_NN': pd.Series(n_neighbors_list),
'NN_distance_mean': pd.Series(dd_mean, dtype='float32'),
'NN_distance_std': pd.Series(dd_std, dtype='float32'),
'NN_angle_mean': pd.Series(aa_mean, dtype='float32'),
'NN_angle_std': pd.Series(aa_std, dtype='float32'),
'Lindemann_5': pd.Series(lindemann_list_5, dtype='float32'),
'Lindemann_10': pd.Series(lindemann_list_10, dtype='float32'),
'Lindemann_20': pd.Series(lindemann_list_20, dtype='float32'),
})
#def intensity_points(im, centers):
centers_int = np.array(centers, np.int32)
intensity_list = mask_gt[centers_int[:, 1], centers_int[:, 0]]
# return intensity_list
#plt.figure()
#df.plot()
#plt.legend(loc='best')
#plt.show()
a = 34
b = 45
"""
fig2 = plt.imshow(im, cmap='gray')
k = 100
#plt.triplot(centers[:, 0], centers[:, 1], tri.simplices)
plt.plot(centers[:, 0], centers[:, 1], 'o')
plt.plot(centers[k, 0], centers[k, 1], '*')
for nn in neighbors_list[k]:
plt.plot(centers[nn, 0], centers[nn, 1], '*')
plt.show()
"""
|
the-stack_106_13967
|
import discord
import random
import utils
from discord.ext import commands
from unsplash import Unsplash, Photo, UnsplashException
from typing import Optional, List
from aiohttp import ClientError
utm_params = '?utm_source=discord_bot_doggie_bot&utm_medium=referral'
async def get_pic(url: str, ctx: utils.CustomContext, key: str) -> str:
async with ctx.bot.session.get(url) as resp:
data = await resp.json()
return data[key]
async def furry_image(ctx, user: Optional[discord.User], endpoint: str, action: str, a2: str = None):
if not user or user == ctx.author:
msg = f'{ctx.author.mention} has no one to {action} :('
elif user.bot:
msg = f'{ctx.author.mention} tries to {action} a bot... sad :('
else:
msg = f'{ctx.author.mention} {action + "s" if not a2 else a2} {user.mention}!'
url = await get_pic(f'https://v2.yiff.rest/furry/{endpoint}', ctx, key='images')
return utils.create_embed(ctx.author, title=f'Furry {action}!', description=msg, image=url[0]['url'])
def check_unsplash():
def predicate(ctx):
if ctx.bot.config['unsplash_api_key']:
return True
raise utils.MissingAPIKey(
'The Unsplash API key is missing!'
'The owner of this bot can add an API key in `config.yaml`'
)
return commands.check(predicate)
class Random(commands.Cog):
"""Commands to get something random, like colors or images!"""
def __init__(self, bot):
self.bot: utils.CustomBot = bot
if bot.config['unsplash_api_key']:
self.unsplash = Unsplash(bot.config['unsplash_api_key'])
else:
self.unsplash = None
self.cached_random_photos: List[Photo] = []
@commands.group(invoke_without_command=True)
async def random(self, ctx: utils.CustomContext):
await ctx.send_help(ctx.command)
@random.command(aliases=['user'])
async def member(self, ctx: utils.CustomContext, include_bots=False):
"""Shows a random member from this server!"""
member = random.choice([m for m in ctx.guild.members if not m.bot or m.bot == include_bots])
embed = utils.create_embed(
ctx.author,
title='Random member from server!',
description=f'{member.mention} - (ID: {member.id})',
thumbnail=member.display_avatar
)
await ctx.send(embed=embed)
@random.command(aliases=['colour'])
async def color(self, ctx: utils.CustomContext):
"""Shows a random color!"""
alias = ctx.invoked_with.lower()
color = discord.Color.random()
buffer = await self.bot.loop.run_in_executor(None, utils.solid_color_image, color.to_rgb())
file = discord.File(filename="color.png", fp=buffer)
embed = utils.create_embed(
ctx.author,
title=f'Showing random {alias}:',
color=color,
thumbnail="attachment://color.png"
)
embed.add_field(name='Hex:', value=f'`{color}`')
embed.add_field(name='Int:', value=f'`{str(color.value).zfill(8)}`')
embed.add_field(name='RGB:', value=f'`{color.to_rgb()}`')
await ctx.send(file=file, embed=embed)
@commands.group(invoke_without_command=True)
async def unsplash(self, ctx: utils.CustomContext):
await ctx.send_help(ctx.command)
@check_unsplash()
@unsplash.command(name='random', aliases=['rdm'])
@commands.cooldown(10, 60, commands.BucketType.user)
async def rdm(self, ctx: utils.CustomContext):
"""Gets a random photo from the Unsplash API!"""
if not self.cached_random_photos:
self.cached_random_photos = await self.unsplash.random(content_filter='high', count=30)
image: Photo = self.cached_random_photos.pop(0)
description = f'"{image.description or image.alt_description}"\n\n' \
f'*Photo by [{image.user.name}](https://unsplash.com/@{image.user.username}{utm_params}) on ' \
f'[Unsplash](https://unsplash.com/{utm_params})*'
embed = utils.create_embed(
ctx.author,
title='Unsplash Image',
description=description,
image=image.urls.regular,
color=image.color,
timestamp=image.created_at
)
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(10, 60, commands.BucketType.user)
async def fox(self, ctx: utils.CustomContext):
"""Gets a random fox from randomfox.ca"""
url = await get_pic('https://randomfox.ca/floof/', ctx, key='image')
embed = utils.create_embed(ctx.author, title=f'Random fox picture!:', image=url)
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(10, 60, commands.BucketType.user)
async def duck(self, ctx: utils.CustomContext):
"""Gets a random duck from random-d.uk"""
url = await get_pic('https://random-d.uk/api/v2/quack', ctx, key='url')
embed = utils.create_embed(ctx.author, title=f'Random duck picture!:', image=url)
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(10, 60, commands.BucketType.user)
async def dog(self, ctx: utils.CustomContext):
"""Gets a random dog from random.dog"""
url = await get_pic('https://random.dog/woof.json?filter=mp4', ctx, key='url')
embed = utils.create_embed(ctx.author, title=f'Random dog picture!:', image=url)
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(10, 60, commands.BucketType.user)
async def hug(self, ctx: utils.CustomContext, *, user: Optional[discord.User]):
"""Get picture of furries hugging, because why not?"""
embed = await furry_image(ctx, user, 'hug', 'hug')
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(10, 60, commands.BucketType.user)
async def boop(self, ctx: utils.CustomContext, *, user: Optional[discord.User]):
"""Get picture of furries booping eachother, because why not?"""
embed = await furry_image(ctx, user, 'boop', 'boop')
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(10, 60, commands.BucketType.user)
async def hold(self, ctx: utils.CustomContext, *, user: Optional[discord.User]):
"""Get picture of furries holding eachother, because why not?"""
embed = await furry_image(ctx, user, 'hold', 'hold')
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(10, 60, commands.BucketType.user)
async def kiss(self, ctx: utils.CustomContext, *, user: Optional[discord.User]):
"""Get picture of furries kissing, because why not?"""
embed = await furry_image(ctx, user, 'kiss', 'kiss', 'kisses')
await ctx.send(embed=embed)
@commands.command(aliases=['licc'])
@commands.cooldown(10, 60, commands.BucketType.user)
async def lick(self, ctx: utils.CustomContext, *, user: Optional[discord.User]):
"""Get picture of furries licking eachother, because why not?"""
embed = await furry_image(ctx, user, 'lick', 'lick')
await ctx.send(embed=embed)
async def cog_command_error(self, ctx: utils.CustomContext, error: Exception) -> None:
embed = None
if isinstance(error, commands.CommandInvokeError):
error = error.original
if isinstance(error, utils.MissingAPIKey):
embed = utils.create_embed(
ctx.author,
title='Bot missing API key!',
description=str(error),
color=discord.Color.red()
)
if isinstance(error, (UnsplashException, ClientError)):
embed = utils.create_embed(
ctx.author,
title='Error while using api!',
description='For some reason an error happened, maybe the API is down?',
color=discord.Color.red()
)
if embed:
return await ctx.send(embed=embed)
ctx.uncaught_error = True
def setup(bot):
bot.add_cog(Random(bot))
|
the-stack_106_13969
|
from rdflib import URIRef, Graph, Literal, BNode
from shexer.model.graph.abstract_sgraph import SGraph
from shexer.utils.triple_yielders import tune_token, tune_prop, tune_subj
from shexer.utils.uri import add_corners_if_it_is_an_uri, remove_corners
from shexer.core.instances.pconsts import _S, _P, _O
from shexer.model.IRI import IRI as ModelIRI
from shexer.model.property import Property as ModelProperty
from shexer.model.Literal import Literal as ModelLiteral
from shexer.model.bnode import BNode as ModelBnode
class RdflibSgraph(SGraph):
def __init__(self, rdflib_graph=None, source_file=None, raw_graph=None, format="turtle"):
"""
Pass an rdflib.Graph object or the params source_file and format to parse a local rdf
:param rdflib_graph:
:param source_file:
:param format:
"""
super().__init__()
self._rdflib_graph = rdflib_graph if rdflib_graph is not None else self._build_rdflib_graph(source=source_file,
raw_graph=raw_graph,
format=format)
def query_single_variable(self, str_query, variable_id):
rows_res = self._rdflib_graph.query(str_query)
return [str(a_row[0]) for a_row in rows_res]
def serialize(self, path, format):
self._rdflib_graph.serialize(destination=path,
format=format)
def yield_p_o_triples_of_an_s(self, target_node):
for s, p ,o in self._rdflib_graph.triples((URIRef(remove_corners(a_uri=target_node,
raise_error_if_no_corners=False)),
None,
None)):
yield str(s), str(p), self._add_lang_if_needed(o)
def yield_class_triples_of_an_s(self, target_node, instantiation_property):
for s ,p, o in self._rdflib_graph.triples((URIRef(remove_corners(a_uri=target_node,
raise_error_if_no_corners=False)),
URIRef(remove_corners(a_uri=instantiation_property,
raise_error_if_no_corners=False)),
None)):
yield str(s), str(p), self._add_lang_if_needed(o)
def add_triple(self, a_triple):
"""
It receives a tuple of 3 string elements. It adds it to the local rdflib graph
:param a_triple:
:return:
"""
subj = tune_subj(add_corners_if_it_is_an_uri(a_triple[_S]),
raise_error_if_no_corners=False)
prop = tune_prop(add_corners_if_it_is_an_uri(a_triple[_P]),
raise_error_if_no_corners=False)
obj = tune_token(add_corners_if_it_is_an_uri(a_triple[_O]),
raise_error_if_no_corners=False)
self._rdflib_graph.add((self._turn_obj_into_rdflib_element(subj),
self._turn_obj_into_rdflib_element(prop),
self._turn_obj_into_rdflib_element(obj)))
def _turn_obj_into_rdflib_element(self, model_elem):
if type(model_elem) == ModelIRI or type(model_elem) == ModelProperty:
return URIRef(model_elem.iri)
elif type(model_elem) == ModelLiteral:
return Literal(lexical_or_value=str(model_elem),
datatype=model_elem.elem_type)
elif type(model_elem) == ModelBnode:
return BNode(value=str(model_elem))
else:
raise ValueError("Unexpected type of element. " + str(model_elem) + ": " + str(type(model_elem)))
def _build_rdflib_graph(self, source, raw_graph, format):
result = Graph()
if source is not None:
result.parse(source=source, format=format)
else:
result.parse(data=raw_graph, format=format)
return result
def _add_lang_if_needed(self, rdflib_obj):
"""
It return a string representation with lang if it is a langString
:param rdflib_obj:
:return:
"""
result = str(rdflib_obj)
if type(rdflib_obj) == Literal and rdflib_obj.language is not None:
result = '"' + result + '"@' + rdflib_obj.language
return result
|
the-stack_106_13970
|
import glob
import math
import os
import random
import shutil
from pathlib import Path
from PIL import Image
# import matplotlib.pyplot as plt
from tqdm import tqdm
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
# try:
# from utils.utils import xyxy2xywh, xywh2xyxy
# except:
# from utils import xyxy2xywh, xywh2xyxy
def xyxy2xywh(x):
# Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h]
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2
y[:, 1] = (x[:, 1] + x[:, 3]) / 2
y[:, 2] = x[:, 2] - x[:, 0]
y[:, 3] = x[:, 3] - x[:, 1]
return y
def xywh2xyxy(x):
# Convert bounding box format from [x, y, w, h] to [x1, y1, x2, y2]
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2
y[:, 1] = x[:, 1] - x[:, 3] / 2
y[:, 2] = x[:, 0] + x[:, 2] / 2
y[:, 3] = x[:, 1] + x[:, 3] / 2
return y
class LoadImages: # for inference
def __init__(self, path, img_size=416):
self.height = img_size
img_formats = ['.jpg', '.jpeg', '.png', '.tif']
vid_formats = ['.mov', '.avi', '.mp4']
files = []
if os.path.isdir(path):
files = sorted(glob.glob('%s/*.*' % path))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in ' + path
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'File Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB
img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, img_size=416):
self.cam = cv2.VideoCapture(0)
self.height = img_size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == 27: # esc to quit
cv2.destroyAllWindows()
raise StopIteration
# Read image
ret_val, img0 = self.cam.read()
assert ret_val, 'Webcam Error'
img_path = 'webcam_%g.jpg' % self.count
img0 = cv2.flip(img0, 1) # flip left-right
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB
img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return img_path, img, img0, self.cam
def __len__(self):
return 0
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, batch_size, img_size=416, augment=True, multi_scale=False):
print('LoadImagesAndLabels init : ',path)
with open(path, 'r') as file:
img_files = file.read().splitlines()
img_files = list(filter(lambda x: len(x) > 0, img_files))
np.random.shuffle(img_files) # shuffle img_list
print("shuffle image...")
self.img_files = img_files
assert len(self.img_files) > 0, 'No images found in %s' % path
self.img_size = img_size
self.batch_size = batch_size
self.multi_scale = multi_scale
self.augment = augment
self.scale_index = 0
if self.multi_scale:
self.img_size = 608 # initiate with maximum multi_scale size, in case of out of memory
print("Multi scale images training, init img_size", self.img_size)
else:
print("Fixed scale images, img_size", self.img_size)
self.label_files = [
x.replace('images', 'labels').replace("JPEGImages", 'labels').replace('.bmp', '.txt').replace('.jpg', '.txt').replace('.png', '.txt')
for x in self.img_files]
# print('self.img_files : ',self.img_files[1])
# print('self.label_files : ',self.label_files[1])
def __len__(self):
return len(self.img_files)
def __getitem__(self, index):
# if self.multi_scale and (index % self.batch_size == 0) and index != 0:
if self.multi_scale and (self.scale_index % self.batch_size == 0)and self.scale_index != 0:
self.img_size = random.choice(range(11, 16)) * 32
# print("++++++ change img_size, index:", self.img_size, index)
if self.multi_scale:
self.scale_index += 1
if self.scale_index >= (100*self.batch_size):
self.scale_index = 0
img_path = self.img_files[index]
label_path = self.label_files[index]
img = cv2.imread(img_path) # BGR
assert img is not None, 'File Not Found ' + img_path
augment_hsv = random.random() < 0.5 # hsv_aug prob = 0.5
if self.augment and augment_hsv:
# SV augmentation by 50%
fraction = 0.50 # must be < 1.0
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
S = img_hsv[:, :, 1].astype(np.float32)
V = img_hsv[:, :, 2].astype(np.float32)
a = (random.random() * 2 - 1) * fraction + 1 # a in [-0,5, 1.5]
S *= a
if a > 1:
np.clip(S, None, 255, out=S)
a = (random.random() * 2 - 1) * fraction + 1
V *= a
if a > 1:
np.clip(V, None, 255, out=V)
img_hsv[:, :, 1] = S # .astype(np.uint8)
img_hsv[:, :, 2] = V # .astype(np.uint8)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
h, w, _ = img.shape
img, ratio, padw, padh = letterbox(img, height=self.img_size, augment=self.augment)
# Load labels
labels = []
if os.path.isfile(label_path):
with open(label_path, 'r') as file:
lines = file.read().splitlines()
x = np.array([x.split() for x in lines], dtype=np.float32)
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio * w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = ratio * h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = ratio * w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = ratio * h * (x[:, 2] + x[:, 4] / 2) + padh
# Augment image and labels
if self.augment:
img, labels = random_affine(img, labels, degrees=(-10, 10), translate=(0.10, 0.10), scale=(0.9, 1.1))
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) / self.img_size # 转化 格式 ,且 归一化
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() > 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() > 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))# 加了 一个 batch size
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Normalize
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img, dtype=np.float32) # uint8 to float32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
return torch.from_numpy(img), labels_out, img_path, (h, w)
@staticmethod
def collate_fn(batch):
img, label, path, hw = list(zip(*batch)) # transposed
for i, l in enumerate(label):
l[:, 0] = i # 获取 物体的 归属于 图片 的 index
return torch.stack(img, 0), torch.cat(label, 0), path, hw
def letterbox(img, height=416, augment=False, color=(127.5, 127.5, 127.5)):
# Resize a rectangular image to a padded square
shape = img.shape[:2] # shape = [height, width]
ratio = float(height) / max(shape) # ratio = old / new
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio))
dw = (height - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
# resize img
if augment:
interpolation = np.random.choice([None, cv2.INTER_NEAREST, cv2.INTER_LINEAR,
None, cv2.INTER_NEAREST, cv2.INTER_LINEAR,
cv2.INTER_AREA, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4])
if interpolation is None:
img = cv2.resize(img, new_shape)
else:
img = cv2.resize(img, new_shape, interpolation=interpolation)
else:
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_NEAREST)
# print("resize time:",time.time()-s1)
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded square
return img, ratio, dw, dh
def random_affine(img, targets=(), degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-2, 2),
borderValue=(127.5, 127.5, 127.5)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
if targets is None:
targets = []
border = 0 # width of added border (optional)
height = max(img.shape[0], img.shape[1]) + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.random() * (degrees[1] - degrees[0]) + degrees[0]
# a += random.choice([-180, -90, 0, 90]) # 90deg rotations added to small rotations
s = random.random() * (scale[1] - scale[0]) + scale[0]
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border # x translation (pixels)
T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # y shear (deg)
M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
imw = cv2.warpPerspective(img, M, dsize=(height, height), flags=cv2.INTER_LINEAR,
borderValue=borderValue) # BGR order borderValue
# Return warped points also
if len(targets) > 0:
n = targets.shape[0]
points = targets[:, 1:5].copy()
area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# apply angle-based reduction of bounding boxes
radians = a * math.pi / 180
reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
x = (xy[:, 2] + xy[:, 0]) / 2
y = (xy[:, 3] + xy[:, 1]) / 2
w = (xy[:, 2] - xy[:, 0]) * reduction
h = (xy[:, 3] - xy[:, 1]) * reduction
xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
np.clip(xy, 0, height, out=xy)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return imw, targets
def convert_images2bmp():
# cv2.imread() jpg at 230 img/s, *.bmp at 400 img/s
for path in ['../coco/images/val2014/', '../coco/images/train2014/']:
folder = os.sep + Path(path).name
output = path.replace(folder, folder + 'bmp')
if os.path.exists(output):
shutil.rmtree(output) # delete output folder
os.makedirs(output) # make new output folder
for f in tqdm(glob.glob('%s*.jpg' % path)):
save_name = f.replace('.jpg', '.bmp').replace(folder, folder + 'bmp')
cv2.imwrite(save_name, cv2.imread(f))
for label_path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
with open(label_path, 'r') as file:
lines = file.read()
lines = lines.replace('2014/', '2014bmp/').replace('.jpg', '.bmp').replace(
'/Users/glennjocher/PycharmProjects/', '../')
with open(label_path.replace('5k', '5k_bmp'), 'w') as file:
file.write(lines)
|
the-stack_106_13971
|
from ..schema import EBMLDocument, UnknownElement, CONTAINER, BINARY
def dump_element(element, indent=0):
if isinstance(element, UnknownElement):
print(('\t' * indent) + ('<Unknown id=\'%s\' bytes=\'%i\' />' % (hex(element.id), element.body_size)))
else:
sargs = {
'name': element.name,
'bytes': element.body_size,
'value': element.value
}
def print_indented(foo):
print(('\t' * indent) + foo)
if element.type == CONTAINER:
print_indented('<%(name)s>' % sargs)
for sub_el in element.value:
dump_element(sub_el, indent + 1)
print_indented('</%(name)s>' % sargs)
elif element.type == BINARY:
print_indented('<%(name)s bytes=\'%(bytes)i\' />' % sargs)
else:
print_indented('<%(name)s>%(value)s</%(name)s>' % sargs)
def dump_document(document):
for el in document.roots:
dump_element(el)
if __name__ == '__main__':
import sys
from optparse import OptionParser
parser = OptionParser(usage='Usage: %prog [OPTION] FILE')
parser.add_option('--document-class', dest='document_class', help='the document class to use', metavar='CLASS')
options, args = parser.parse_args()
if options.document_class is None:
class doc_cls(EBMLDocument):
type = None
version = None
else:
mod_name, _, cls_name = options.document_class.rpartition('.')
try:
doc_mod = __import__(mod_name, fromlist=[cls_name])
doc_cls = getattr(doc_mod, cls_name)
except ImportError:
parser.error('unable to import module %s' % mod_name)
except AttributeError:
parser.error('unable to import class %s from %s' % (cls_name, mod_name))
if not args:
parser.error('no file provided')
elif len(args) > 1:
parser.error('more than one file provided')
with open(args[0], 'rb') as stream:
doc = doc_cls(stream)
dump_document(doc)
|
the-stack_106_13973
|
listanum = [[],[]] #Primeiro colchete numeros pares e segundo colchete numeros impares
valor = 0
for cont in range(1,8):
valor = int(input("Digite um valor: "))
if valor % 2 ==0:
listanum[0].append(valor)
else:
listanum[1].append(valor)
listanum[0].sort()
listanum[1].sort()
print(f"Os numeros pares digitados foram {listanum[0]}")
print(f"Os numeros impares digitados foram {listanum[1]}")
|
the-stack_106_13975
|
import numpy as np
import pytest
from aesara import shared
@pytest.mark.parametrize(
"rng", [np.random.RandomState(123), np.random.default_rng(123)]
)
def test_GeneratorSharedVariable(rng):
s_rng_default = shared(rng)
s_rng_True = shared(rng, borrow=True)
s_rng_False = shared(rng, borrow=False)
# test borrow contract: that False means a copy must have been made
assert s_rng_default.container.storage[0] is not rng
assert s_rng_False.container.storage[0] is not rng
# test current implementation: that True means a copy was not made
assert s_rng_True.container.storage[0] is rng
# ensure that all the random number generators are in the same state
if hasattr(rng, "randn"):
v = rng.randn()
v0 = s_rng_default.container.storage[0].randn()
v1 = s_rng_False.container.storage[0].randn()
else:
v = rng.standard_normal()
v0 = s_rng_default.container.storage[0].standard_normal()
v1 = s_rng_False.container.storage[0].standard_normal()
assert v == v0 == v1
@pytest.mark.parametrize(
"rng", [np.random.RandomState(123), np.random.default_rng(123)]
)
def test_get_value_borrow(rng):
s_rng = shared(rng)
r_ = s_rng.container.storage[0]
r_T = s_rng.get_value(borrow=True)
r_F = s_rng.get_value(borrow=False)
# the contract requires that borrow=False returns a copy
assert r_ is not r_F
# the current implementation allows for True to return the real thing
assert r_ is r_T
# either way, the rngs should all be in the same state
if hasattr(rng, "rand"):
assert r_.rand() == r_F.rand()
else:
assert r_.standard_normal() == r_F.standard_normal()
@pytest.mark.parametrize(
"rng", [np.random.RandomState(123), np.random.default_rng(123)]
)
def test_get_value_internal_type(rng):
s_rng = shared(rng)
# there is no special behaviour required of return_internal_type
# this test just ensures that the flag doesn't screw anything up
# by repeating the get_value_borrow test.
r_ = s_rng.container.storage[0]
r_T = s_rng.get_value(borrow=True, return_internal_type=True)
r_F = s_rng.get_value(borrow=False, return_internal_type=True)
# the contract requires that borrow=False returns a copy
assert r_ is not r_F
# the current implementation allows for True to return the real thing
assert r_ is r_T
# either way, the rngs should all be in the same state
if hasattr(rng, "rand"):
assert r_.rand() == r_F.rand()
else:
assert r_.standard_normal() == r_F.standard_normal()
@pytest.mark.parametrize("rng_ctor", [np.random.RandomState, np.random.default_rng])
def test_set_value_borrow(rng_ctor):
s_rng = shared(rng_ctor(123))
new_rng = rng_ctor(234234)
# Test the borrow contract is respected:
# assigning with borrow=False makes a copy
s_rng.set_value(new_rng, borrow=False)
assert new_rng is not s_rng.container.storage[0]
if hasattr(new_rng, "randn"):
assert new_rng.randn() == s_rng.container.storage[0].randn()
else:
assert new_rng.standard_normal() == s_rng.container.storage[0].standard_normal()
# Test that the current implementation is actually borrowing when it can.
rr = rng_ctor(33)
s_rng.set_value(rr, borrow=True)
assert rr is s_rng.container.storage[0]
|
the-stack_106_13977
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on Oct 24, 2014
@author: Javier Garcia, [email protected]
'''
import sqlite3
import pandas as pd
import sqlalchemy
import matplotlib.pyplot as plt
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 14/08/2014
@author: Javier Garcia, [email protected]
'''
import os
import pandas as pd
import numpy as np
from data import DataHandler
from event import MarketEvent
from pprint import pprint
# pylint: disable=too-many-instance-attributes
# Eight is reasonable in this case.
class HistoricSQLiteDataHandler(DataHandler):
"""
HistoricSQLiteDataHandler is designed to read a SQLite database for
each requested symbol from disk and provide an interface
to obtain the "latest" bar in a manner identical to a live
trading interface.
ARG:
database_path: the system path where the SQLite database is stored.
symbol_list: list containing the name of the symbols
to read in the database.
The SQL consult is expected to have the following structure:
[date-time, open, high, low, close, volume, adjusted_close]
IMPORTANT: for different symbols in differents time-zones you must assure
the data is correctly syncronized. Athena does not check
this.
"""
def __init__(self, events, database, symbol_list):
"""
Initialises the historic data handler by requesting
the location of the database and a list of symbols.
It will be assumed that all price data is in a table called
'symbols', where the field 'symbol' is a string in the list.
Parameters:
events - The Event Queue.
csv_dir - Absolute directory path to the database
symbol_list - A list of symbol strings.
"""
self.events = events
self.database = database
self.symbol_list = symbol_list
self.symbol_data = {}
self.latest_symbol_data = {}
self.continue_backtest = True
self.bar_index = 0
self.all_data_dic = {} # access data in list form for testing
self._open_convert_database_data()
def _connect_to_database(self, database, flavor='sqlite3'):
"""
Connect to the database ....
:param database: full path to SQLite3 database to connect
"""
if flavor == 'sqlite3':
try:
connection = sqlite3.connect(database, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
return connection
except sqlite3.Error as err:
print('Error connecting database', err.args[0])
# TODO: this leg is not finished
elif flavor == 'SQLAlchemy':
try:
engine = sqlalchemy.create_engine('sqlite://'+database)
return engine
except sqlalchemy.exc as err:
print('Error connecting database', err)
def _get_prices(self, conn, symbol, cols):
"""
Query the database and returns a dataframe with the chosen 7 columns.
:param conn:
:param symbol:
:param cols:
"""
values_qry = '''SELECT {},{},{},{},{},{},{}
FROM prices WHERE symbol="{}"'''.format(cols[0],
cols[1],
cols[2],
cols[3],
cols[4],
cols[5],
cols[6],
symbol)
return pd.read_sql(values_qry, conn, index_col='price_date')
def _open_convert_database_data(self):
"""
Opens the CSV files from the data directory, converting
them into pandas DataFrames within a symbol dictionary.
For this handler it will be assumed that the data is
taken from DTN IQFeed. Thus its format will be respected.
"""
comb_index = None
columns = ['price_date',
'open_price',
'high_price',
'low_price',
'close_price',
'volume',
'adjusted_price']
connection = self._connect_to_database(self.database)
for symbol in self.symbol_list:
self.symbol_data[symbol] = self._get_prices(connection, symbol, columns)
# Combine the index to pad forward values
if comb_index is None:
comb_index = self.symbol_data[symbol].index
else:
comb_index.union(self.symbol_data[symbol].index)
# Set the latest symbol_data to None
self.latest_symbol_data[symbol] = []
# Reindex the dataframes
for symbol in self.symbol_list:
self.all_data_dic[symbol] = self.symbol_data[symbol].\
reindex(index=comb_index, method=None)
self.symbol_data[symbol] = self.symbol_data[symbol].\
reindex(index=comb_index, method=None).iterrows()
def _get_new_bar(self, symbol):
"""
Returns the latest bar from the data feed.
"""
for symbol_gen in self.symbol_data[symbol]:
yield symbol_gen
def get_latest_bar(self, symbol):
"""
Returns the last bar from the latest_symbol list.
"""
try:
bars_list = self.latest_symbol_data[symbol]
except KeyError:
raise KeyError("Symbol is not available in the data set.")
else:
if not bars_list:
raise KeyError('latest_symbol_data has not been initialized.')
else:
return bars_list[-1]
def get_latest_bars(self, symbol, bars=1):
"""
Returns the last N bars from the latest_symbol list,
or N-k if less available.
"""
try:
bars_list = self.latest_symbol_data[symbol]
except KeyError:
raise KeyError("Symbol is not available in the data set.")
else:
if not bars_list:
raise KeyError('latest_symbol_data has not been initialized.')
else:
return bars_list[-bars:]
def get_latest_bar_datetime(self, symbol):
"""
Returns a Python datetime object for the last bar.
"""
try:
bars_list = self.latest_symbol_data[symbol]
except KeyError:
raise KeyError("Symbol is not available in the data set.")
else:
if not bars_list:
raise KeyError ('latest_symbol_data has not been initialized.')
else:
return bars_list[-1][0]
def get_latest_bar_value(self, symbol, val_type):
"""
Returns one of the Open, High, Low, Close, Volume or OI
values from the pandas Bar series object.
"""
try:
bars_list = self.latest_symbol_data[symbol]
except KeyError:
raise KeyError("Symbol is not available in the data set.")
else:
if not bars_list:
raise KeyError ('latest_symbol_data has not been initialized.')
else:
return getattr(bars_list[-1][1], val_type)
def get_latest_bars_values(self, symbol, val_type, bars=1):
"""
Returns the last N bar values from the
latest_symbol list, or N-k if less available.
"""
try:
bars_list = self.get_latest_bars(symbol, bars)
except KeyError:
raise KeyError("Symbol is not available in the data set.")
else:
if not bars_list:
raise KeyError ('latest_symbol_data has not been initialized.')
else:
return np.array([getattr(b[1], val_type) for b in bars_list])
def update_bars(self):
"""
Pushes the latest bar to the latest_symbol_data structure
for all symbols in the symbol list.
"""
for symbol in self.symbol_list:
try:
bars = self._get_new_bar(symbol).__next__()
except StopIteration:
self.continue_backtest = False
else:
if bars is not None:
self.latest_symbol_data[symbol].append(bars)
self.events.put(MarketEvent())
#
#
#
# import queue
# if __name__ == '__main__':
# from_location = 'PC'
# if from_location == 'PC':
# DATABASE = 'C:/Users/javgar119/Documents/Dropbox/SEC_MASTER/securities_master.SQLITE'
# elif from_location == 'MAC':
# DATABASE = '//Users/Javi/Dropbox/MarketDB/securities_master.SQLITE'
#
# LIST_OF_SYMBOLS = ['SPY_QL', 'EWA_QL']
# EVENT = queue.Queue()
# STORE = HistoricSQLiteDataHandler(EVENT, DATABASE, LIST_OF_SYMBOLS)
#
# print(type(STORE))
#
#
#
# for dummy in range(5483):
# STORE.update_bars()
# # DATETIME1 = STORE.get_latest_bar_datetime('SPY_QL')
# # SPY = STORE.get_latest_bar_value('SPY_QL', val_type='close_price')
# bar_value = STORE.get_latest_bar('SPY_QL')
# # DATETIME2 = STORE.get_latest_bar_datetime('EWA_QL')
# # EWA = STORE.get_latest_bar_value('EWA_QL', val_type='close_price')
# print(bar_value)
# # print(DATETIME1, SPY, ' - ', DATETIME2, EWA)
#
#
#
#
# def _get_prices(conn, symbols, cols):
# """
#
# :param conn:
# :param symbol:
# :param cols:
# """
# symbol_data = dict()
# for each_symbol in symbols:
# values_qry = '''SELECT {},{},{},{},{},{},{}
# FROM prices WHERE symbol="{}"'''.format(cols[0],
# cols[1],
# cols[2],
# cols[3],
# cols[4],
# cols[5],
# cols[6],
# each_symbol)
# symbol_data[each_symbol] = pd.read_sql(values_qry,
# conn,
# index_col='price_date')
# return symbol_data
#
#
# def _connect_to_database(database, flavor='sqlite3'):
# """
# Connect to the database ....
# :param database: full path to SQLite3 database to connect
# """
# if flavor == 'sqlite3':
# try:
# connection = sqlite3.connect(database, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
# return connection
# except sqlite3.Error as err:
# print('Error connecting database', err.args[0])
# # TODO: this leg is not finished
# elif flavor == 'SQLAlchemy':
# try:
# engine = sqlalchemy.create_engine('sqlite://' + database)
# return engine
# except sqlalchemy.exc as err:
# print('Error connecting database', err)
#
#
#
#
#
# if __name__ == '__main__':
# from_location = 'PC'
# if from_location == 'PC':
# database = 'C:/Users/javgar119/Documents/Dropbox/SEC_MASTER/securities_master.SQLITE'
# elif from_location == 'MAC':
# database = '//Users/Javi/Dropbox/MarketDB/securities_master.SQLITE'
#
# conn = _connect_to_database(database)
#
# symbols = ['SP500_QL', 'USO_QL']
# columns = ['price_date', 'open_price', 'high_price', 'low_price', 'close_price', 'volume', 'adjusted_price']
# symbol_data = _get_prices(conn, symbols, columns)
|
the-stack_106_13978
|
import datetime
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.x509.oid import NameOID
ec_key = ec.generate_private_key(ec.SECP256K1, default_backend())
subject = issuer = x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"hendrix-tls-example"),
])
cert = x509.CertificateBuilder().subject_name(
subject
).issuer_name(
issuer
).public_key(
ec_key.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.datetime.utcnow()
).not_valid_after(
datetime.datetime.utcnow() + datetime.timedelta(days=10)
).add_extension(
x509.SubjectAlternativeName([x509.DNSName(u"localhost")]),
critical=False,
).sign(ec_key, hashes.SHA256(), default_backend())
with open("ec-key.pem", "wb") as f:
f.write(ec_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
))
with open("ec-certificate.pem", "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
|
the-stack_106_13979
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.text import slugify
from django.utils import timezone
from django.utils.text import format_lazy
from django.utils.translation import gettext_lazy as _
def validate_date(date, field_name):
"""
Confirm that a date is not in the future.
:param date: a timezone aware date instance.
:param field_name: the name of the field being checked.
:return:
"""
if date and date > timezone.localdate():
raise ValidationError(
{field_name: _('Date can not be in the future.')},
code='date_invalid')
def validate_duration(model, max_duration=timedelta(hours=24)):
"""
Basic sanity checks for models with a duration
:param model: a model instance with 'start' and 'end' attributes
:param max_duration: maximum allowed duration between start and end time
:return:
"""
if model.start and model.end:
if model.start > model.end:
raise ValidationError(
_('Start time must come before end time.'),
code='end_before_start')
if model.end - model.start > max_duration:
raise ValidationError(_('Duration too long.'), code='max_duration')
def validate_unique_period(queryset, model):
"""
Confirm that model's start and end date do not intersect with other
instances.
:param queryset: a queryset of instances to check against.
:param model: a model instance with 'start' and 'end' attributes
:return:
"""
if model.id:
queryset = queryset.exclude(id=model.id)
if model.start and model.end:
if queryset.filter(start__lt=model.end, end__gt=model.start):
raise ValidationError(
_('Another entry intersects the specified time period.'),
code='period_intersection')
def validate_time(time, field_name):
"""
Confirm that a time is not in the future.
:param time: a timezone aware datetime instance.
:param field_name: the name of the field being checked.
:return:
"""
if time and time > timezone.localtime():
raise ValidationError(
{field_name: _('Date/time can not be in the future.')},
code='time_invalid')
class Child(models.Model):
model_name = 'child'
first_name = models.CharField(max_length=255, verbose_name=_('First name'))
last_name = models.CharField(max_length=255, verbose_name=_('Last name'))
birth_date = models.DateField(
blank=False,
null=False,
verbose_name=_('Birth date')
)
slug = models.SlugField(
allow_unicode=True,
blank=False,
editable=False,
max_length=100,
unique=True,
verbose_name=_('Slug')
)
picture = models.ImageField(
blank=True,
null=True,
upload_to='child/picture/',
verbose_name=_('Picture')
)
objects = models.Manager()
cache_key_count = 'core.child.count'
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
ordering = ['last_name', 'first_name']
verbose_name = _('Child')
verbose_name_plural = _('Children')
def __str__(self):
return '{} {}'.format(self.first_name, self.last_name)
def save(self, *args, **kwargs):
self.slug = slugify(self, allow_unicode=True)
super(Child, self).save(*args, **kwargs)
cache.set(self.cache_key_count, Child.objects.count(), None)
def delete(self, using=None, keep_parents=False):
super(Child, self).delete(using, keep_parents)
cache.set(self.cache_key_count, Child.objects.count(), None)
def name(self, reverse=False):
if reverse:
return '{}, {}'.format(self.last_name, self.first_name)
return '{} {}'.format(self.first_name, self.last_name)
@classmethod
def count(cls):
""" Get a (cached) count of total number of Child instances. """
return cache.get_or_set(cls.cache_key_count, Child.objects.count, None)
class DiaperChange(models.Model):
model_name = 'diaperchange'
child = models.ForeignKey(
'Child',
on_delete=models.CASCADE,
related_name='diaper_change',
verbose_name=_('Child')
)
time = models.DateTimeField(
blank=False,
null=False,
verbose_name=_('Time')
)
wet = models.BooleanField(verbose_name=_('Wet'))
solid = models.BooleanField(verbose_name=_('Solid'))
color = models.CharField(
blank=True,
choices=[
('black', _('Black')),
('brown', _('Brown')),
('green', _('Green')),
('yellow', _('Yellow')),
],
max_length=255,
verbose_name=_('Color')
)
amount = models.FloatField(blank=True, null=True, verbose_name=_('Amount'))
notes = models.TextField(blank=True, null=True, verbose_name=_('Notes'))
objects = models.Manager()
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
ordering = ['-time']
verbose_name = _('Diaper Change')
verbose_name_plural = _('Diaper Changes')
def __str__(self):
return str(_('Diaper Change'))
def attributes(self):
attributes = []
if self.wet:
attributes.append(self._meta.get_field('wet').verbose_name)
if self.solid:
attributes.append(self._meta.get_field('solid').verbose_name)
if self.color:
attributes.append(self.get_color_display())
return attributes
def clean(self):
validate_time(self.time, 'time')
# One or both of Wet and Solid is required.
if not self.wet and not self.solid:
raise ValidationError(
_('Wet and/or solid is required.'), code='wet_or_solid')
class Feeding(models.Model):
model_name = 'feeding'
child = models.ForeignKey(
'Child',
on_delete=models.CASCADE,
related_name='feeding',
verbose_name=_('Child')
)
start = models.DateTimeField(
blank=False,
null=False,
verbose_name=_('Start time')
)
end = models.DateTimeField(
blank=False,
null=False,
verbose_name=_('End time')
)
duration = models.DurationField(
editable=False,
null=True,
verbose_name=_('Duration')
)
type = models.CharField(
choices=[
('breast milk', _('Breast milk')),
('formula', _('Formula')),
('fortified breast milk', _('Fortified breast milk')),
('solid food', _('Solid food')),
],
max_length=255,
verbose_name=_('Type')
)
method = models.CharField(
choices=[
('bottle', _('Bottle')),
('left breast', _('Left breast')),
('right breast', _('Right breast')),
('both breasts', _('Both breasts')),
('parent fed', _('Parent fed')),
('self fed', _('Self fed')),
],
max_length=255,
verbose_name=_('Method')
)
amount = models.FloatField(blank=True, null=True, verbose_name=_('Amount'))
notes = models.TextField(blank=True, null=True, verbose_name=_('Notes'))
objects = models.Manager()
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
ordering = ['-start']
verbose_name = _('Feeding')
verbose_name_plural = _('Feedings')
def __str__(self):
return str(_('Feeding'))
def save(self, *args, **kwargs):
if self.start and self.end:
self.duration = self.end - self.start
super(Feeding, self).save(*args, **kwargs)
def clean(self):
validate_time(self.start, 'start')
validate_time(self.end, 'end')
validate_duration(self)
validate_unique_period(Feeding.objects.filter(child=self.child), self)
class Note(models.Model):
model_name = 'note'
child = models.ForeignKey(
'Child',
on_delete=models.CASCADE,
related_name='note',
verbose_name=_('Child')
)
note = models.TextField(verbose_name=_('Note'))
time = models.DateTimeField(
default=timezone.now,
blank=False,
verbose_name=_('Time')
)
objects = models.Manager()
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
ordering = ['-time']
verbose_name = _('Note')
verbose_name_plural = _('Notes')
def __str__(self):
return str(_('Note'))
class NapsManager(models.Manager):
def get_queryset(self):
qs = super(NapsManager, self).get_queryset()
return qs.filter(id__in=[obj.id for obj in qs if obj.nap])
class Sleep(models.Model):
model_name = 'sleep'
child = models.ForeignKey(
'Child',
on_delete=models.CASCADE,
related_name='sleep',
verbose_name=_('Child')
)
napping = models.BooleanField(
editable=False,
null=True,
verbose_name=_('Napping')
)
start = models.DateTimeField(
blank=False,
null=False,
verbose_name=_('Start time')
)
end = models.DateTimeField(
blank=False,
null=False,
verbose_name=_('End time')
)
duration = models.DurationField(
editable=False,
null=True,
verbose_name=_('Duration')
)
notes = models.TextField(blank=True, null=True, verbose_name=_('Notes'))
objects = models.Manager()
naps = NapsManager()
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
ordering = ['-start']
verbose_name = _('Sleep')
verbose_name_plural = _('Sleep')
def __str__(self):
return str(_('Sleep'))
@property
def nap(self):
nap_start_min = timezone.datetime.strptime(
settings.BABY_BUDDY['NAP_START_MIN'], '%H:%M').time()
nap_start_max = timezone.datetime.strptime(
settings.BABY_BUDDY['NAP_START_MAX'], '%H:%M').time()
local_start_time = timezone.localtime(self.start).time()
return nap_start_min <= local_start_time <= nap_start_max
def save(self, *args, **kwargs):
if self.start and self.end:
self.duration = self.end - self.start
self.napping = self.nap
super(Sleep, self).save(*args, **kwargs)
def clean(self):
validate_time(self.start, 'start')
validate_time(self.end, 'end')
validate_duration(self)
validate_unique_period(Sleep.objects.filter(child=self.child), self)
class Temperature(models.Model):
model_name = 'temperature'
child = models.ForeignKey(
'Child',
on_delete=models.CASCADE,
related_name='temperature',
verbose_name=_('Child')
)
temperature = models.FloatField(
blank=False,
null=False,
verbose_name=_('Temperature')
)
time = models.DateTimeField(
blank=False,
null=False,
verbose_name=_('Time')
)
notes = models.TextField(blank=True, null=True, verbose_name=_('Notes'))
objects = models.Manager()
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
ordering = ['-time']
verbose_name = _('Temperature')
verbose_name_plural = _('Temperature')
def __str__(self):
return str(_('Temperature'))
def clean(self):
validate_time(self.time, 'time')
class Timer(models.Model):
model_name = 'timer'
child = models.ForeignKey(
'Child',
blank=True,
null=True,
on_delete=models.CASCADE,
related_name='timers',
verbose_name=_('Child')
)
name = models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name=_('Name')
)
start = models.DateTimeField(
default=timezone.now,
blank=False,
verbose_name=_('Start time')
)
end = models.DateTimeField(
blank=True,
editable=False,
null=True,
verbose_name=_('End time')
)
duration = models.DurationField(
editable=False,
null=True,
verbose_name=_('Duration')
)
active = models.BooleanField(
default=True,
editable=False,
verbose_name=_('Active')
)
user = models.ForeignKey(
'auth.User',
on_delete=models.CASCADE,
related_name='timers',
verbose_name=_('User')
)
objects = models.Manager()
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
ordering = ['-active', '-start', '-end']
verbose_name = _('Timer')
verbose_name_plural = _('Timers')
def __str__(self):
return self.name or str(format_lazy(_('Timer #{id}'), id=self.id))
@property
def title_with_child(self):
""" Get Timer title with child name in parenthesis. """
title = str(self)
# Only actually add the name if there is more than one Child instance.
if title and self.child and Child.count() > 1:
title = format_lazy('{title} ({child})', title=title,
child=self.child)
return title
@property
def user_username(self):
""" Get Timer user's name with a preference for the full name. """
if self.user.get_full_name():
return self.user.get_full_name()
return self.user.get_username()
@classmethod
def from_db(cls, db, field_names, values):
instance = super(Timer, cls).from_db(db, field_names, values)
if not instance.duration:
instance.duration = timezone.now() - instance.start
return instance
def restart(self):
"""Restart the timer."""
self.start = timezone.now()
self.end = None
self.duration = None
self.active = True
self.save()
def stop(self, end=None):
"""Stop the timer."""
if not end:
end = timezone.now()
self.end = end
self.save()
def save(self, *args, **kwargs):
self.active = self.end is None
self.name = self.name or None
if self.start and self.end:
self.duration = self.end - self.start
else:
self.duration = None
super(Timer, self).save(*args, **kwargs)
def clean(self):
validate_time(self.start, 'start')
if self.end:
validate_time(self.end, 'end')
validate_duration(self)
class TummyTime(models.Model):
model_name = 'tummytime'
child = models.ForeignKey(
'Child',
on_delete=models.CASCADE,
related_name='tummy_time',
verbose_name=_('Child')
)
start = models.DateTimeField(
blank=False,
null=False,
verbose_name=_('Start time')
)
end = models.DateTimeField(
blank=False,
null=False,
verbose_name=_('End time')
)
duration = models.DurationField(
editable=False,
null=True,
verbose_name=_('Duration')
)
milestone = models.CharField(
blank=True,
max_length=255,
verbose_name=_('Milestone')
)
objects = models.Manager()
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
ordering = ['-start']
verbose_name = _('Tummy Time')
verbose_name_plural = _('Tummy Time')
def __str__(self):
return str(_('Tummy Time'))
def save(self, *args, **kwargs):
if self.start and self.end:
self.duration = self.end - self.start
super(TummyTime, self).save(*args, **kwargs)
def clean(self):
validate_time(self.start, 'start')
validate_time(self.end, 'end')
validate_duration(self)
validate_unique_period(
TummyTime.objects.filter(child=self.child), self)
class Weight(models.Model):
model_name = 'weight'
child = models.ForeignKey(
'Child',
on_delete=models.CASCADE,
related_name='weight',
verbose_name=_('Child')
)
weight = models.FloatField(
blank=False,
null=False,
verbose_name=_('Weight')
)
date = models.DateField(
blank=False,
null=False,
verbose_name=_('Date')
)
notes = models.TextField(blank=True, null=True, verbose_name=_('Notes'))
objects = models.Manager()
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
ordering = ['-date']
verbose_name = _('Weight')
verbose_name_plural = _('Weight')
def __str__(self):
return str(_('Weight'))
def clean(self):
validate_date(self.date, 'date')
|
the-stack_106_13980
|
from inspect_wikidump import init_inspect
from utils import common
import config
import json
from tqdm import tqdm
import spacy
import spacy.tokens
from sqlitedict import SqliteDict
import numpy as np
# We only need documents.
nlp = spacy.load('en')
nlp.remove_pipe('parser')
nlp.remove_pipe('ner')
def spacy_get_pos(tokens):
doc = spacy.tokens.doc.Doc(
nlp.vocab, words=tokens)
for name, proc in nlp.pipeline:
proc(doc)
return [token.pos_ for token in doc]
def get_sentence_tokens(texts, charoffsets):
whole_text = "".join(texts)
tokens = []
sentence_offsets = []
start_t = 0
end_t = 0
for offset_list in charoffsets:
end_t = start_t
for start, end in offset_list:
cur_token = whole_text[start:end]
if len(cur_token) > 0:
tokens.append(cur_token)
end_t += 1
sentence_offsets.append((start_t, end_t))
start_t = end_t
return tokens, sentence_offsets
def iterative_abs(debug_num=None):
total_doc_num = init_inspect.TOTAL_NUM_DOC if debug_num is None else debug_num
cur_count = 0
with open(config.ABS_WIKI_FILE, 'rb') as abs_file:
for line in tqdm(abs_file, total=total_doc_num):
item = json.loads(line)
# print(item.keys())
# print()
tokens, sent_offset = get_sentence_tokens(item['text'], item['charoffset'])
poss = spacy_get_pos(tokens)
assert len(tokens) == len(poss)
print(tokens)
print(sent_offset)
# print(poss)
def iterative_abs_save_info(debug_num=None):
total_doc_num = init_inspect.TOTAL_NUM_DOC if debug_num is None else debug_num
cur_count = 0
with open(config.ABS_WIKI_FILE, 'rb') as abs_file:
with SqliteDict(str(config.ABS_PROCESS_FOR_RINDEX_DB), encode=json.dumps, decode=json.loads) as abs_rindex_db:
for line in tqdm(abs_file, total=total_doc_num):
item = json.loads(line)
# print(item.keys())
# print()
if item['title'] in abs_rindex_db:
continue
tokens, sent_offset = get_sentence_tokens(item['text'], item['charoffset'])
poss = spacy_get_pos(tokens)
assert len(tokens) == len(poss)
# print(tokens)
# print(sent_offset)
abs_rindex_db[item['title']] = {
'tokens': tokens,
'poss': poss,
'sentence_offset': sent_offset
}
cur_count += 1
if cur_count % 5000 == 0:
abs_rindex_db.commit()
abs_rindex_db.commit()
abs_rindex_db.close()
def iterative_abs_save_random_batching(batch_size=10000):
total_doc_num = init_inspect.TOTAL_NUM_DOC
with open(config.ABS_WIKI_FILE, 'rb') as abs_file:
lines = []
for line in tqdm(abs_file, total=total_doc_num):
lines.append(line)
# if len(lines) == 100000:
# break
random_per = range(len(lines))
# random_per = np.random.permutation(len(lines))
# random.shuffle(lines)
# existing_title_set = set()
batch_list = []
with SqliteDict(str(config.ABS_PROCESS_FOR_RINDEX_DB), encode=json.dumps, decode=json.loads) as abs_rindex_db:
for index in tqdm(random_per):
item = json.loads(lines[index])
# print(item.keys())
# print()
if item['title'] in abs_rindex_db:
continue
tokens, sent_offset = get_sentence_tokens(item['text'], item['charoffset'])
poss = spacy_get_pos(tokens)
assert len(tokens) == len(poss)
# print(tokens)
# print(sent_offset)
rindex_item = {
'tokens': tokens,
'poss': poss,
'sentence_offset': sent_offset
}
batch_list.append((item['title'], rindex_item))
if len(batch_list) == batch_size:
for title, rindex_item in batch_list:
abs_rindex_db[title] = rindex_item
abs_rindex_db.commit()
batch_list = []
# Commit last one
for title, rindex_item in batch_list:
abs_rindex_db[title] = rindex_item
abs_rindex_db.commit()
abs_rindex_db.close()
if __name__ == '__main__':
# iterative_abs()
# iterative_abs_save_info()
iterative_abs_save_random_batching()
|
the-stack_106_13986
|
#!/usr/bin/python2.4
# encoding: utf-8
"""
help.py
Help functionality for the ddG database.
Created by Shane O'Connor 2012.
Copyright (c) 2012 __UCSF__. All rights reserved.
"""
import sys
import re
from string import join
from kddg.api.dbi import StdCursor, ddGDatabase # FieldNames
from klab import colortext
import ddglib
from ddglib.filter import *
import inspect
#dbfields = FieldNames()
def _get_ResultSetFilter_data():
s_module = "ddglib.ddgfilters"
#clsmembers = inspect.getmembers(sys.modules[s_module], lambda member: member.__module__ == s_module and inspect.isclass)
#clsmembers = inspect.getmembers(sys.modules[s_module], lambda member: member.inspect.isclass(member))
m_filters = []
m_resultsets = []
d_filters = {}
s_module = "ddglib.ddgfilters"
for m in inspect.getmembers(sys.modules[s_module]):
o = m[1]
if inspect.isclass(o) and o.__module__ == s_module:
classnm = m[0]
if classnm.find("Filter") != -1:
d = {"name" : classnm, "class" : o}
m_filters.append(d)
d_filters[classnm] = d
elif classnm.find("ResultSet") != -1:
e_Filter = "%sFilter" % classnm[:classnm.find("ResultSet")]
e_Filter = o.allowed_filters
#"%sFilter" % classnm[:classnm.find("ResultSet")]
m_resultsets.append({"name" : classnm, "class" : o, "filter" : e_Filter})
else:
colortext.error("Unknown class '%s' found." % classnm)
return m_filters, m_resultsets, d_filters
def _print_lines(helplines):
for linepair in helplines:
colortext.printf(linepair[0], color=linepair[1])
def ShowDatabaseStructure():
'''Extracts the database structure from the MySQL database and prints it out.'''
help = []
ddGdb = ddGDatabase()
procregex = re.compile(".*?PROCEDURE `.*?`[(](.*?)[)] BEGIN.*")
help.append(("\n* Database structure *", "white"))
help.append(("The tables of the database are as follows (orange = primary key, blue = foreign key):\n", "silver"))
tablenames = sorted([r[0] for r in (ddGdb.execute_select_StdCursor("SHOW TABLES"))])
for tbl in tablenames:
help.append((tbl, "green"))
fieldnames = ddGdb.execute("SHOW COLUMNS FROM %s" % tbl)
for fld in fieldnames:
fname = fld["Field"]
ftype = fld["Type"]
fdefault = fld["Default"]
fextra = fld["Extra"]
fkey = fld["Key"]
fCanBeNull = fld["Null"]
padding = " " * (32 - len(fname))
str = "\t%s%s%s" % (fname, padding, ftype)
if fkey == "PRI":
help.append((str, "orange"))
elif fkey == "MUL":
help.append((str, "lightblue"))
else:
help.append((str, "yellow"))
help.append(("\n* Database stored procedures *", "white"))
help.append(("The stored procedures defined in the database are as follows:\n", "silver"))
sprocs = sorted([r[0] for r in ddGdb.execute_select_StdCursor("SELECT ROUTINE_NAME FROM INFORMATION_SCHEMA.ROUTINES WHERE ROUTINE_SCHEMA = 'ddG' AND ROUTINE_TYPE = 'PROCEDURE'")])
for sproc in sprocs:
defn = ddGdb.execute("SHOW CREATE PROCEDURE %s" % sproc)[0]
mtchs = procregex.match((defn["Create Procedure"] or "").replace("\n", " "))
if mtchs:
help.append(("\t%s( %s )" % (defn["Procedure"], mtchs.group(1)), "green"))
else:
help.append(("\t%s" % defn["Procedure"], "green"))
_print_lines(help)
def ShowResultSet():
'''Explains how to extract sets of data from the database.'''
help = []
m_filters, m_resultsets, d_filters = _get_ResultSetFilter_data()
help.append(('''
* ResultSets *''', "white"))
help.append(('''
ResultSets are used to store data retrieved from the database. They are the result of an SQL query or
stored procedure call. To create a ResultSet, use the following format:''', 'silver'))
help.append((''' SomeResultSet(db, <string:SQL>, <tuple:parameters>, <list:AdditionalIDs>)''', "lightblue"))
help.append(('''The argument db is a datbase connection retrieved from calling kddg.api.dbi.ddGDatabase().
The SQL string and associated parameters are optional. If they are not supplied and neither is AdditionalIDs
then all records will be returned. If only AdditionalIDs is supplied then only records with those primary keys
in the list are returned. If an SQL string is supplied then that query will be run instead with any associated
parameters. If AdditionalIDs is also supplied then the associated records are included in the result set.
Examples:
1. Return all structures:''', "silver"))
help.append((''' sr = StructureResultSet(ddGdb)''', "lightblue"))
help.append(('''2. Get four specific structures:''', "silver"))
help.append((''' sr = StructureResultSet(ddGdb, AdditionalIDs = ['2BQC', '1LAW', '1LHH', '1LHI'])''', "lightblue"))
help.append(('''3. Get all predictions with IDs >= 12395 from the 'testrun' prediction set:''', "silver"))
help.append((''' pr = PredictionResultSet(ddGdb, SQL = "WHERE PredictionSet=%s AND ID>=%s", parameters = ("testrun", 12595))''', "lightblue"))
help.append(('''
Note in the last example that the arguments to the SQL string are placed in the parameters tuple and %s is
used for both strings and numbers. This is the recommended approach as it avoids any manual mistakes made
from badly formatting the SQL strings.
An alternative method of narrowing down results is by using Filters. Each ResultSet has associated Filters that
may be applied to return a filtered set of results. This approach is no more powerful than using SQL (and the
implementation is typically slower) but it is hopefully easier to use. It also abstracts away the database structure
so will hopefully survive any changes made to the database design. Filters are explained below.''', "silver"))
help.append(('''
The following ResultSet classes and associated filters are available:\n''', "silver"))
for rs in sorted(m_resultsets):
help.append(("\t%s" % rs["name"], "green"))
if rs["filter"]:
avail_f = [d_filters[rs_f.__name__]["name"] for rs_f in rs["filter"] if d_filters.get(rs_f.__name__)]
help.append(("\t\t%s" % join(avail_f, ", "), "orange"))
_print_lines(help)
def ShowFilter():
'''Explains how to filter sets of data extracted from the database (ResultSets).'''
help = []
m_filters, m_resultsets, d_filters = _get_ResultSetFilter_data()
help.append(('''
* Filters *''', "white"))
help.append(('''
Database records can be retrieved by SQL queries or stored procedures but also through the use of filters.
---
sr = StructureResultSet(ddGdb, AdditionalIDs = ['2BQC', '1LAW', '1LHH', '1LHI'])
sr.addFilter(StructureFilter.TotalBFactors(0,16) | StructureFilter.WithNullResolution(True))
results = sr.getFilteredResults()
We run an SQL query if the SQL parameter has been specified or if AdditionalIDs is empty.
# If AdditionalIDs is not empty and the SQL is blank then we do NOT run the SQL query and
# instead use AdditionalIDs as the record keys.
Note that it is less efficient to use union filters than to set the properties using the set methods. However,
speed may not be an issue and they are quicker to write.
Filters are used to narrow to
---
The following Filters are available:\n''', "silver"))
for filter in sorted(m_filters):
help.append(("%s" % filter["name"], "green"))
c = filter["class"]
class_members = c.getMembers()
help.append(("\tAttributes", "orange"))
for x in sorted(class_members["attributes"]):
help.append(("\t\t%s" % x[0], "silver"))
help.append(("\tMethods", "orange"))
for x in sorted(class_members["methods"]):
arglist = join(inspect.getargspec(x[1])[0][1:], ", ")
help.append(("\t\t%s(%s)" % (x[0], arglist), "silver"))
help.append(("\tFunctions", "orange"))
for x in sorted(class_members["functions"]):
arglist = join(inspect.getargspec(x[1])[0], ", ")
help.append(("\t\t%s(%s)" % (x[0], arglist), "silver"))
_print_lines(help)
def help():
'''This help function.'''
help = []
help.append(("\nThe following help functions are available. To use them, import help and call the functions by name e.g.", "white"))
help.append((" ddglib.help.help()", "lightblue"))
s_module = sys.modules[__name__]
hfns = []
for m in inspect.getmembers(s_module):
o = m[1]
if inspect.isroutine(o) and o.__module__ == "ddglib.help":
if m[0][0] != "_":
hfns.append((m[0], m[1].__doc__))
if hfns:
help.append((" ddglib.help.%s()" % sorted(hfns)[0][0], "lightblue"))
help.append(("", "silver"))
for fn in sorted(hfns):
help.append(("\t%s" % fn[0], "green"))
help.append(("\t %s" % fn[1], "silver"))
_print_lines(help)
|
the-stack_106_13987
|
import itertools
import pathlib
from pathlib import Path
from typing import Union
import gdspy
from gdsfactory.component import Component
from gdsfactory.import_gds import import_gds
COUNTER = itertools.count()
def xor_polygons(A: Component, B: Component, hash_geometry: bool = True):
"""Given two devices A and B, performs a layer-by-layer XOR diff between
A and B, and returns polygons representing the differences between A and B.
Adapted from lytest/kdb_xor.py
"""
# first do a geometry hash to vastly speed up if they are equal
if hash_geometry and (A.hash_geometry() == B.hash_geometry()):
return Component()
D = Component()
A_polys = A.get_polygons(by_spec=True)
B_polys = B.get_polygons(by_spec=True)
A_layers = A_polys.keys()
B_layers = B_polys.keys()
all_layers = set()
all_layers.update(A_layers)
all_layers.update(B_layers)
for layer in all_layers:
if (layer in A_layers) and (layer in B_layers):
p = gdspy.fast_boolean(
A_polys[layer],
B_polys[layer],
operation="xor",
precision=0.001,
max_points=4000,
layer=layer[0],
datatype=layer[1],
)
elif layer in A_layers:
p = A_polys[layer]
elif layer in B_layers:
p = B_polys[layer]
if p is not None:
D.add_polygon(p, layer=layer)
return D
def gdsdiff(
component1: Union[Path, Component, str],
component2: Union[Path, Component, str],
name: str = "TOP",
xor: bool = True,
) -> Component:
"""Compare two Components.
Args:
component1: Component or path to gds file
component2: Component or path to gds file
name: name of the top cell
xor: makes boolean operation
Returns:
Component with both cells (xor, common and diffs)
"""
if isinstance(component1, pathlib.Path):
component1 = str(component1)
if isinstance(component2, pathlib.Path):
component2 = str(component2)
if isinstance(component1, str):
component1 = import_gds(component1, flatten=True)
if isinstance(component2, str):
component2 = import_gds(component2, flatten=True)
top = Component(name=f"{name}_diffs")
if component1.name.startswith("Unnamed"):
component1.name = f"{name}_old"
if component2.name.startswith("Unnamed"):
component2.name = f"{name}_new"
ref1 = top << component1
ref2 = top << component2
ref1.xmin = 0
ref1.ymin = 0
ref2.xmin = 0
ref2.ymin = 0
if xor:
diff = xor_polygons(ref1, ref2, hash_geometry=False)
diff.name = f"{name}_xor"
top.add_ref(diff)
return top
if __name__ == "__main__":
import sys
if len(sys.argv) != 3:
print("Usage: gdsdiff <mask_v1.gds> <mask_v2.gds>")
print("Note that you need to have KLayout opened with klive running")
sys.exit()
c = gdsdiff(sys.argv[1], sys.argv[2])
c.show()
|
the-stack_106_13990
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: viz.py
# Credit: zxytim
import numpy as np
import os
import sys
import io
import cv2
from .fs import mkdir_p
from .argtools import shape2d
try:
import matplotlib.pyplot as plt
except ImportError:
pass
__all__ = ['pyplot2img', 'interactive_imshow',
'stack_patches', 'gen_stack_patches',
'dump_dataflow_images', 'intensity_to_rgb']
def pyplot2img(plt):
""" Convert a pyplot instance to image """
buf = io.BytesIO()
plt.axis('off')
plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
buf.seek(0)
rawbuf = np.frombuffer(buf.getvalue(), dtype='uint8')
im = cv2.imdecode(rawbuf, cv2.IMREAD_COLOR)
buf.close()
return im
def interactive_imshow(img, lclick_cb=None, rclick_cb=None, **kwargs):
"""
Args:
img (np.ndarray): an image (expect BGR) to show.
lclick_cb, rclick_cb: a callback ``func(img, x, y)`` for left/right click event.
kwargs: can be {key_cb_a: callback_img, key_cb_b: callback_img}, to
specify a callback ``func(img)`` for keypress.
Some existing keypress event handler:
* q: destroy the current window
* x: execute ``sys.exit()``
* s: save image to "out.png"
"""
name = 'tensorpack_viz_window'
cv2.imshow(name, img)
def mouse_cb(event, x, y, *args):
if event == cv2.EVENT_LBUTTONUP and lclick_cb is not None:
lclick_cb(img, x, y)
elif event == cv2.EVENT_RBUTTONUP and rclick_cb is not None:
rclick_cb(img, x, y)
cv2.setMouseCallback(name, mouse_cb)
key = chr(cv2.waitKey(-1) & 0xff)
cb_name = 'key_cb_' + key
if cb_name in kwargs:
kwargs[cb_name](img)
elif key == 'q':
cv2.destroyWindow(name)
elif key == 'x':
sys.exit()
elif key == 's':
cv2.imwrite('out.png', img)
def _preproecss_patch_list(plist):
plist = np.asarray(plist)
if plist.ndim == 3:
plist = plist[:, :, :, np.newaxis]
assert plist.ndim == 4 and plist.shape[3] in [1, 3], plist.shape
return plist
def _pad_patch_list(plist, bgcolor):
if isinstance(bgcolor, int):
bgcolor = (bgcolor, bgcolor, bgcolor)
def _pad_channel(plist):
ret = []
for p in plist:
if len(p.shape) == 2:
p = p[:, :, np.newaxis]
if p.shape[2] == 1:
p = np.repeat(p, 3, 2)
ret.append(p)
return ret
plist = _pad_channel(plist)
shapes = [x.shape for x in plist]
ph = max([s[0] for s in shapes])
pw = max([s[1] for s in shapes])
ret = np.zeros((len(plist), ph, pw, 3), dtype=plist[0].dtype)
ret[:, :, :] = bgcolor
for idx, p in enumerate(plist):
s = p.shape
sh = (ph - s[0]) / 2
sw = (pw - s[1]) / 2
ret[idx, sh:sh + s[0], sw:sw + s[1], :] = p
return ret
class Canvas(object):
def __init__(self, ph, pw,
nr_row, nr_col,
channel, border, bgcolor):
self.ph = ph
self.pw = pw
self.nr_row = nr_row
self.nr_col = nr_col
if border is None:
border = int(0.1 * min(ph, pw))
self.border = border
if isinstance(bgcolor, int):
bgchannel = 1
else:
bgchannel = 3
self.bgcolor = bgcolor
self.channel = max(channel, bgchannel)
self.canvas = np.zeros((nr_row * (ph + border) - border,
nr_col * (pw + border) - border,
self.channel), dtype='uint8')
def draw_patches(self, plist):
assert self.nr_row * self.nr_col == len(plist), \
"{}*{} != {}".format(self.nr_row, self.nr_col, len(plist))
if self.channel == 3 and plist.shape[3] == 1:
plist = np.repeat(plist, 3, axis=3)
cur_row, cur_col = 0, 0
if self.channel == 1:
self.canvas.fill(self.bgcolor)
else:
self.canvas[:, :, :] = self.bgcolor
for patch in plist:
r0 = cur_row * (self.ph + self.border)
c0 = cur_col * (self.pw + self.border)
self.canvas[r0:r0 + self.ph, c0:c0 + self.pw] = patch
cur_col += 1
if cur_col == self.nr_col:
cur_col = 0
cur_row += 1
def get_patchid_from_coord(self, x, y):
x = x // (self.pw + self.border)
y = y // (self.pw + self.border)
idx = y * self.nr_col + x
return idx
def stack_patches(
patch_list, nr_row, nr_col, border=None,
pad=False, bgcolor=255, viz=False, lclick_cb=None):
"""
Stacked patches into grid, to produce visualizations like the following:
.. image:: https://github.com/ppwwyyxx/tensorpack/raw/master/examples/GAN/demo/CelebA-samples.jpg
Args:
patch_list(list[ndarray] or ndarray): NHW or NHWC images in [0,255].
nr_row(int), nr_col(int): rows and cols of the grid.
``nr_col * nr_row`` must be equal to ``len(patch_list)``.
border(int): border length between images.
Defaults to ``0.1 * min(patch_width, patch_height)``.
pad (boolean): when `patch_list` is a list, pad all patches to the maximum height and width.
This option allows stacking patches of different shapes together.
bgcolor(int or 3-tuple): background color in [0, 255]. Either an int
or a BGR tuple.
viz(bool): whether to use :func:`interactive_imshow` to visualize the results.
lclick_cb: A callback function ``f(patch, patch index in patch_list)``
to get called when a patch get clicked in imshow.
Returns:
np.ndarray: the stacked image.
"""
if pad:
patch_list = _pad_patch_list(patch_list)
patch_list = _preproecss_patch_list(patch_list)
if lclick_cb is not None:
viz = True
ph, pw = patch_list.shape[1:3]
canvas = Canvas(ph, pw, nr_row, nr_col,
patch_list.shape[-1], border, bgcolor)
if lclick_cb is not None:
def lclick_callback(img, x, y):
idx = canvas.get_patchid_from_coord(x, y)
lclick_cb(patch_list[idx], idx)
else:
lclick_callback = None
canvas.draw_patches(patch_list)
if viz:
interactive_imshow(canvas.canvas, lclick_cb=lclick_callback)
return canvas.canvas
def gen_stack_patches(patch_list,
nr_row=None, nr_col=None, border=None,
max_width=1000, max_height=1000,
bgcolor=255, viz=False, lclick_cb=None):
"""
Similar to :func:`stack_patches` but with a generator interface.
It takes a much-longer list and yields stacked results one by one.
For example, if ``patch_list`` contains 1000 images and ``nr_row==nr_col==10``,
this generator yields 10 stacked images.
Args:
nr_row(int), nr_col(int): rows and cols of each result.
max_width(int), max_height(int): Maximum allowed size of the
stacked image. If ``nr_row/nr_col`` are None, this number
will be used to infer the rows and cols. Otherwise the option is
ignored.
patch_list, border, viz, lclick_cb: same as in :func:`stack_patches`.
Yields:
np.ndarray: the stacked image.
"""
# setup parameters
patch_list = _preproecss_patch_list(patch_list)
if lclick_cb is not None:
viz = True
ph, pw = patch_list.shape[1:3]
if border is None:
border = int(0.1 * min(ph, pw))
if nr_row is None:
nr_row = int(max_height / (ph + border))
if nr_col is None:
nr_col = int(max_width / (pw + border))
canvas = Canvas(ph, pw, nr_row, nr_col, patch_list.shape[-1], border, bgcolor)
nr_patch = nr_row * nr_col
start = 0
if lclick_cb is not None:
def lclick_callback(img, x, y):
idx = canvas.get_patchid_from_coord(x, y)
idx = idx + start
if idx < end:
lclick_cb(patch_list[idx], idx)
else:
lclick_callback = None
while True:
end = start + nr_patch
cur_list = patch_list[start:end]
if not len(cur_list):
return
canvas.draw_patches(cur_list)
if viz:
interactive_imshow(canvas.canvas, lclick_cb=lclick_callback)
yield canvas.canvas
start = end
def dump_dataflow_images(df, index=0, batched=True,
number=1000, output_dir=None,
scale=1, resize=None, viz=None,
flipRGB=False):
"""
Dump or visualize images of a :class:`DataFlow`.
Args:
df (DataFlow): the DataFlow.
index (int): the index of the image component.
batched (bool): whether the component contains batched images (NHW or
NHWC) or not (HW or HWC).
number (int): how many datapoint to take from the DataFlow.
output_dir (str): output directory to save images, default to not save.
scale (float): scale the value, usually either 1 or 255.
resize (tuple or None): tuple of (h, w) to resize the images to.
viz (tuple or None): tuple of (h, w) determining the grid size to use
with :func:`gen_stack_patches` for visualization. No visualization will happen by
default.
flipRGB (bool): apply a RGB<->BGR conversion or not.
"""
if output_dir:
mkdir_p(output_dir)
if viz is not None:
viz = shape2d(viz)
vizsize = viz[0] * viz[1]
if resize is not None:
resize = tuple(shape2d(resize))
vizlist = []
df.reset_state()
cnt = 0
while True:
for dp in df.get_data():
if not batched:
imgbatch = [dp[index]]
else:
imgbatch = dp[index]
for img in imgbatch:
cnt += 1
if cnt == number:
return
if scale != 1:
img = img * scale
if resize is not None:
img = cv2.resize(img, resize)
if flipRGB:
img = img[:, :, ::-1]
if output_dir:
fname = os.path.join(output_dir, '{:03d}.jpg'.format(cnt))
cv2.imwrite(fname, img)
if viz is not None:
vizlist.append(img)
if viz is not None and len(vizlist) >= vizsize:
stack_patches(
vizlist[:vizsize],
nr_row=viz[0], nr_col=viz[1], viz=True)
vizlist = vizlist[vizsize:]
def intensity_to_rgb(intensity, cmap='cubehelix', normalize=False):
"""
Convert a 1-channel matrix of intensities to an RGB image employing a colormap.
This function requires matplotlib. See `matplotlib colormaps
<http://matplotlib.org/examples/color/colormaps_reference.html>`_ for a
list of available colormap.
Args:
intensity (np.ndarray): array of intensities such as saliency.
cmap (str): name of the colormap to use.
normalize (bool): if True, will normalize the intensity so that it has
minimum 0 and maximum 1.
Returns:
np.ndarray: an RGB float32 image in range [0, 255], a colored heatmap.
"""
assert intensity.ndim == 2, intensity.shape
intensity = intensity.astype("float")
if normalize:
intensity -= intensity.min()
intensity /= intensity.max()
cmap = plt.get_cmap(cmap)
intensity = cmap(intensity)[..., :3]
return intensity.astype('float32') * 255.0
if __name__ == '__main__':
if False:
imglist = []
for i in range(100):
fname = "{:03d}.png".format(i)
imglist.append(cv2.imread(fname))
for idx, patch in enumerate(gen_stack_patches(
imglist, max_width=500, max_height=200)):
of = "patch{:02d}.png".format(idx)
cv2.imwrite(of, patch)
else:
imglist = []
img = cv2.imread('out.png')
img2 = cv2.resize(img, (300, 300))
viz = stack_patches([img, img2], 1, 2, pad=True, viz=True)
|
the-stack_106_13991
|
#!/usr/bin/env python
# coding: utf-8
from owlready2 import *
import csv
# import and parse the OWL ontology
onto = get_ontology("working_copy/eu-cm-ontology.owl").load()
# get classes, object properties and data properties
classes = list(onto.classes())
object_properties = list(onto.object_properties())
data_properties = list(onto.data_properties())
# gather classes and properties into one set of entities
entities = []
entities.extend(classes)
entities.extend(object_properties)
entities.extend(data_properties)
# write flat list of ontology entities to file - with prefix abbreviation "cbcm:"
with open("working_copy/cbcm_ontology_terms_flatlist.csv", 'w', newline='') as flatlistfile:
writer = csv.writer(flatlistfile)
for entity in entities:
entityNameParts = str(entity).split(".")
entityName = "cbcm:" + entityNameParts[1]
writer.writerow([entityName])
# write mapping file of ontology entities to human-readable labels and human-readable definitions (candidates for tooltip texts)
with open("working_copy/cbcm_ontology_terms_tooltip_texts.csv", 'w', newline='') as tooltipsfile:
writer = csv.writer(tooltipsfile, quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(["ontology_term","ontology_term_label","ontology_term_definition"])
for entity in entities:
entityNameParts = str(entity).split(".")
entityName = "cbcm:" + entityNameParts[1]
entityComment = "None"
entityLabel = "None"
if (len(entity.comment) > 0):
entityComment = entity.comment[0]
if (len(entity.label) > 0):
entityLabel = entity.label[0]
writer.writerow([entityName,entityLabel,entityComment])
|
the-stack_106_13992
|
# coding=utf-8
from atlassian import Jira
jira = Jira(url="http://localhost:8080", username="admin", password="admin")
def get_all_users(group, include_inactive=True):
"""
Get all users for group. If there more, than 50 users in group:
go through the pages and append other users to the list
:param group:
:param include_inactive:
:return:
"""
start = 0
users = jira.get_all_users_from_group(
group, include_inactive_users=include_inactive, start=start
)
processed_data = {
"group_name": group,
"total": users["total"],
"users": [
{"name": user["name"], "active": user["active"]} for user in users["values"]
],
}
while "nextPage" in users:
start += 50
users = jira.get_all_users_from_group(
group, include_inactive_users=include_inactive, start=start
)
user_list = [
{"name": user["name"], "active": user["active"]} for user in users["values"]
]
processed_data["users"] = processed_data["users"] + user_list
return processed_data
def sort_users_in_group(group):
"""
Take group, sort users by the name and return group with sorted users
"""
group["users"] = [
sorted_group for sorted_group in sorted(group["users"], key=lambda k: k["name"])
]
return group
def get_groups_data():
"""
Get all groups, get all users for each group and sort groups by users
:return:
"""
groups = [group["name"] for group in jira.get_groups(limit=200)["groups"]]
groups_and_users = [get_all_users(group) for group in groups]
groups_and_users = [sort_users_in_group(group) for group in groups_and_users]
return groups_and_users
def get_inactive_users(groups):
"""
Take group list and return groups only with inactive users
:param groups:
:return:
"""
inactive_users_list = []
for group in groups:
inactive_users = {
"group_name": group["group_name"],
"users": [
{"name": user["name"], "active": user["active"]}
for user in group["users"]
if not user["active"]
],
}
inactive_users_list.append(inactive_users)
return inactive_users_list
def exclude_inactive_users(groups):
"""
Excluding inactive users from groups.
:param groups:
:return:
"""
for group in groups:
for user in group["users"]:
print(f'Trying to delete {user["name"]} from group {group["group_name"]}')
jira.remove_user_from_group(user["name"], group["group_name"])
return True
def filter_groups_by_members(groups, quantity=1):
"""
Take groups list and return empty groups
:param groups:
:param quantity:
:return:
"""
return [x for x in groups if int(x["total"]) < quantity]
def find_group(groups, group_name):
"""
Take groups list and find group by the group name
:param groups:
:param group_name:
:return:
"""
for group in groups:
if group["group_name"] == group_name:
return group
else:
return f"Group {group_name} not in list"
|
the-stack_106_13994
|
from __future__ import absolute_import
from __future__ import division
import socket
import socks
from pwnlib.log import getLogger
from pwnlib.timeout import Timeout
from pwnlib.tubes.sock import sock
log = getLogger(__name__)
class remote(sock):
r"""Creates a TCP or UDP-connection to a remote host. It supports
both IPv4 and IPv6.
The returned object supports all the methods from
:class:`pwnlib.tubes.sock` and :class:`pwnlib.tubes.tube`.
Arguments:
host(str): The host to connect to.
port(int): The port to connect to.
fam: The string "any", "ipv4" or "ipv6" or an integer to pass to :func:`socket.getaddrinfo`.
typ: The string "tcp" or "udp" or an integer to pass to :func:`socket.getaddrinfo`.
timeout: A positive number, None or the string "default".
ssl(bool): Wrap the socket with SSL
ssl_context(ssl.SSLContext): Specify SSLContext used to wrap the socket.
sni: Set 'server_hostname' in ssl_args based on the host parameter.
sock(socket.socket): Socket to inherit, rather than connecting
ssl_args(dict): Pass ssl.wrap_socket named arguments in a dictionary.
Examples:
>>> r = remote('google.com', 443, ssl=True)
>>> r.send(b'GET /\r\n\r\n')
>>> r.recvn(4)
b'HTTP'
If a connection cannot be made, an exception is raised.
>>> r = remote('127.0.0.1', 1)
Traceback (most recent call last):
...
PwnlibException: Could not connect to 127.0.0.1 on port 1
You can also use :meth:`.remote.fromsocket` to wrap an existing socket.
>>> import socket
>>> s = socket.socket()
>>> s.connect(('google.com', 80))
>>> s.send(b'GET /' + b'\r\n'*2)
9
>>> r = remote.fromsocket(s)
>>> r.recvn(4)
b'HTTP'
"""
def __init__(self, host, port,
fam = "any", typ = "tcp",
ssl=False, sock=None, ssl_context=None, ssl_args=None, sni=True,
*args, **kwargs):
super(remote, self).__init__(*args, **kwargs)
self.rport = port
self.rhost = host
if sock:
self.family = sock.family
self.type = sock.type
self.proto = sock.proto
self.sock = sock
else:
typ = self._get_type(typ)
fam = self._get_family(fam)
try:
self.sock = self._connect(fam, typ)
except socket.gaierror as e:
if e.errno != socket.EAI_NONAME:
raise
self.error('Could not resolve hostname: %r', host)
if self.sock:
self.settimeout(self.timeout)
self.lhost, self.lport = self.sock.getsockname()[:2]
if ssl:
# Deferred import to save startup time
import ssl as _ssl
ssl_args = ssl_args or {}
ssl_context = ssl_context or _ssl.SSLContext(_ssl.PROTOCOL_TLSv1_2)
if isinstance(sni, str):
ssl_args["server_hostname"] = sni
elif sni:
ssl_args["server_hostname"] = host
self.sock = ssl_context.wrap_socket(self.sock,**ssl_args)
def _connect(self, fam, typ):
sock = None
timeout = self.timeout
with self.waitfor('Opening connection to %s on port %s' % (self.rhost, self.rport)) as h:
for res in socket.getaddrinfo(self.rhost, self.rport, fam, typ, 0, socket.AI_PASSIVE):
self.family, self.type, self.proto, _canonname, sockaddr = res
if self.type not in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:
continue
h.status("Trying %s", sockaddr[0])
sock = socket.socket(self.family, self.type, self.proto)
if timeout is not None and timeout <= 0:
sock.setblocking(0)
else:
sock.setblocking(1)
sock.settimeout(timeout)
try:
sock.connect(sockaddr)
return sock
except socks.ProxyError:
raise
except socket.error:
pass
self.error("Could not connect to %s on port %s", self.rhost, self.rport)
@classmethod
def fromsocket(cls, socket):
"""
Helper method to wrap a standard python socket.socket with the
tube APIs.
Arguments:
socket: Instance of socket.socket
Returns:
Instance of pwnlib.tubes.remote.remote.
"""
s = socket
host, port = s.getpeername()
return remote(host, port, fam=s.family, typ=s.type, sock=s)
class tcp(remote):
__doc__ = remote.__doc__
def __init__(self, host, port, *a, **kw):
return super(tcp, self).__init__(host, port, typ="tcp", *a, **kw)
class udp(remote):
__doc__ = remote.__doc__
def __init__(self, host, port, *a, **kw):
return super(udp, self).__init__(host, port, typ="udp", *a, **kw)
class connect(remote):
__doc__ = remote.__doc__
|
the-stack_106_14000
|
#!/usr/bin/env python3
""" Executable for game of life """
import game_of_life
SIZE_X, SIZE_Y = 80, 30
def main():
""" Main function """
game = game_of_life.CursesGame(size_x=SIZE_X, size_y=SIZE_Y,
max_size=True)
game.print_world()
while True:
user_command = game.get_command_from_user()
game.handle_command(user_command)
game.print_world()
if __name__ == "__main__":
main()
|
the-stack_106_14002
|
#!/usr/bin/env python3
from threading import Timer
from datetime import datetime
import subprocess
import os
import sys
sys.path.append("../lib/")
import json
import json_parser
import pos
import pos_util
import cli
import pos_constant
import time
POS_ROOT = '../../../'
ARRAYNAME = "POSArray"
#SECONDS FOR A MINUTE
#SET 60 FOR REALTIME TESTING
SECONDS_PER_MINUTE = 60
#MINUTES FOR AN HOUR
#SET 60 FOR REALTIME TESTING
MINUTES_PER_HOUR = 60
TESTTIME_IN_HOUR = 72
ELAPSED_MIN = 0
ELAPSED_HOUR = 0
RC = True
#for PM
# DATA = "unvme-ns-0,unvme-ns-1,unvme-ns-2,unvme-ns-3,unvme-ns-4"
# SPARE_1 = "unvme-ns-5"
# SPARE_2 = "unvme-ns-6"
# SPARE_3 = "unvme-ns-7"
# DETACH_1 = "unvme-ns-0"
# DETACH_2 = "unvme-ns-1"
#VOL_SIZE = 2 * pos_constant.SIZE_1GB
DEV_1 = "unvme-ns-0"
DEV_2 = "unvme-ns-1"
DEV_3 = "unvme-ns-2"
DEV_4 = "unvme-ns-3"
DEV_1_RECYCLED = "unvme-ns-4"
DEV_2_RECYCLED = "unvme-ns-5"
DEV_3_RECYCLED = "unvme-ns-6"
DEV_4_RECYCLED = "unvme-ns-7"
VOL_SIZE = 10 * pos_constant.SIZE_1GB
#MAX VOLUME COUNT FOR A TEST
#DO NOT SET GREATER THAN 24
#In the VM environment recommends up to 12 or less
MAX_VOL_CNT = 2
VOL_NAME_PREFIX = "vol"
VOL_CNT = 0
class TestTimer():
def __init__(self, seconds, target):
self._should_continue = False
self.is_running = False
self.seconds = seconds
self.target = target
self.thread = None
def _handle_target(self):
self.is_running = True
self.target()
self.is_running = False
if (RC == False):
self.cancel()
elif (ELAPSED_HOUR == TESTTIME_IN_HOUR):
self.cancel()
else:
self._start_timer()
def _start_timer(self):
if self._should_continue: # Code could have been running when cancel was called.
self.thread = Timer(self.seconds, self._handle_target)
self.thread.start()
def start(self):
if not self._should_continue and not self.is_running:
self._should_continue = True
self._start_timer()
else:
write_log("Timer already started or running, please wait if you're restarting.")
def cancel(self):
if self.thread is not None:
self._should_continue = False # Just in case thread is running and cancel fails.
self.thread.cancel()
else:
write_log("Timer never started or failed to initialize.")
def join(self):
while self._should_continue == True:
time.sleep(1)
class FIO():
def __init__(self):
self._list = {}
def start_fio(self, vol_id):
key = VOL_NAME_PREFIX + str(vol_id)
if key in self._list.keys():
write_log ("fio id: " + key + " is already running")
return False
else:
ip_addr = pos.TR_ADDR
ns_id = str(vol_id + 1)
test_name = key
file_name = "trtype=tcp adrfam=IPv4 traddr=" + ip_addr + \
" trsvcid=1158 subnqn=nqn.2019-04.pos\:subsystem1 ns= " + ns_id
remainig_min = (TESTTIME_IN_HOUR - ELAPSED_HOUR) * MINUTES_PER_HOUR - ELAPSED_MIN
runtime_sec = remainig_min * SECONDS_PER_MINUTE
write_log ("runtime_in_sec: " + str(runtime_sec))
ioengine_path = POS_ROOT + "lib/spdk/examples/nvme/fio_plugin/fio_plugin"
fio_proc = subprocess.Popen(["fio",
"--ioengine=" + ioengine_path,\
"--runtime=" + str(runtime_sec), \
"--bs=4096", \
"--iodepth=128",\
"--readwrite=write",\
"--offset=0",\
"--bs_unaligned=1",\
"--bs=4096",\
"--verify=md5",\
"--serialize_overlap=1",\
"--time_based",\
"--numjobs=1",\
"--thread=1",\
"--group_reporting=1",\
"--direct=1",\
"--name=" + test_name, \
"--filename=" + file_name]\
)
self._list[key] = fio_proc
write_log ("fio id: " + key + " has been started")
return True
def stop_fio(self, key):
if key in self._list.keys():
fio_proc = self._list[key]
if fio_proc.poll() is None:
fio_proc.kill()
fio_proc.wait()
write_log ("fio id: " + key + " has been terminated")
else:
write_log ("fio id: " + key + " already been terminated")
def dispose(self):
write_log ("fio dispose, dict len: " + str(len(self._list)))
for key in self._list:
write_log ("stop_fio target: " + key)
self.stop_fio(key)
self._list.clear()
fio_util = FIO()
def timer_tick():
global ELAPSED_MIN
ELAPSED_MIN = ELAPSED_MIN + 1
if (ELAPSED_MIN == MINUTES_PER_HOUR):
global ELAPSED_HOUR
ELAPSED_HOUR = ELAPSED_HOUR + 1
ELAPSED_MIN = 0
write_log ("")
write_log ("")
print_time()
if (ELAPSED_MIN == 0):
tick_hour()
def print_time():
curr = datetime.now()
timelog = '[' + curr.strftime("%H:%M:%S") + '] Time elapsed ' + str(ELAPSED_HOUR).zfill(2) + ":" + str(ELAPSED_MIN).zfill(2)
write_log(timelog)
def write_log(_log):
print(_log)
with open("72hour_test_log", "a") as result_file:
result_file.write(_log+"\n")
def start_pos():
write_log ("starting pos...")
ret = pos.start_pos()
if ret is False:
write_log("faild to start pos")
return False
write_log ("pos is running")
return True
def exit_pos():
write_log ("exiting pos...")
state = get_state()
if state == "NORMAL" or state == "BUSY":
ret = unmount_pos()
if ret == False:
write_log("pos unmounting failed")
return False
fio_util.dispose()
pos.exit_pos()
write_log ("pos has been terminated")
return True
def abort_pos():
write_log ("abort pos for dump...")
fio_util.dispose()
pos_util.abort_process("pos")
write_log ("pos has been terminated")
def restart_pos():
ret = exit_pos()
if ret == False:
write_log("pos restarting failed while exiting")
return False
ret = start_pos()
if ret is False:
write_log("pos restarting failed while starting")
return False
write_log("pos has been restarted")
return True
def scan_dev():
write_log ("scan_dev begin")
pos_util.pci_rescan()
time.sleep(2)
cli.scan_device()
cli.list_device()
write_log ("scan_dev done")
def create_array():
DATA = DEV_1 + "," + DEV_2 + "," + DEV_3
out = cli.create_array("uram0", DATA, "", ARRAYNAME, "")
code = json_parser.get_response_code(out)
if code == 0:
write_log ("array created successfully")
return True
else:
write_log ("array creation failed, code: " + str(code))
return False
def mount_pos():
out = cli.mount_array(ARRAYNAME)
code = json_parser.get_response_code(out)
if code == 0:
write_log ("array mounted successfully")
return True
else:
write_log ("array mounting failed code: " + str(code))
return False
def unmount_pos():
out = cli.unmount_array(ARRAYNAME)
code = json_parser.get_response_code(out)
if code == 0:
write_log ("array unmounted successfully")
return True
else:
write_log ("array unmounting failed code: " + str(code))
return False
def create_and_mount_vol():
global VOL_CNT
vol_name = VOL_NAME_PREFIX + str(VOL_CNT)
write_log ("try to create volume, name: " + vol_name + ", size: " + str(VOL_SIZE))
out = cli.create_volume(vol_name, str(VOL_SIZE), "", "", ARRAYNAME)
code = json_parser.get_response_code(out)
if code == 0:
VOL_CNT = VOL_CNT + 1
write_log ("volume: " + vol_name + " created successfully, vol_cnt: " + str(VOL_CNT))
return mount_vol(vol_name)
else:
write_log ("volume: " + vol_name + " creation failed, code: " + str(code))
return False
def mount_vol(vol_name):
out = cli.mount_volume(vol_name, ARRAYNAME, "")
code = json_parser.get_response_code(out)
if code == 0:
write_log ("volume: " + vol_name + " mounted successfully")
return True
else:
write_log ("volume: " + vol_name + " mounting failed, code: " + str(code))
return False
def detach_data(target):
pos_util.pci_detach_and_attach(target)
time.sleep(0.1)
def add_spare(spare):
out = cli.add_device(spare, ARRAYNAME)
code = json_parser.get_response_code(out)
if code == 0:
write_log ("Spare device: " + spare + " has been added successfully")
return True
else:
write_log ("Spare device: " + spare + " adding failed, code: " + str(code))
return False
def mbr_reset():
cli.mbr_reset()
def tick_hour():
global RC
RC = do_event(ELAPSED_HOUR)
def init_test():
# pos_util.kill_process("poseidonos")
ret = start_pos()
if ret is False:
return False
scan_dev()
mbr_reset()
create_array()
ret = mount_pos()
return ret
def add_new_vol_and_do_io(cnt = 1):
for i in range(cnt):
ret = create_and_mount_vol()
if ret == False:
write_log ("failed to add volume")
return False
fio_util.start_fio(VOL_CNT - 1)
write_log (VOL_NAME_PREFIX + str(VOL_CNT - 1) + " volume is newly created and I/O is being performed there")
return True
def get_state():
out = cli.array_info(ARRAYNAME)
state = json_parser.get_state(out)
return state
def check_state(state_expected):
state = get_state()
if state == state_expected:
write_log ("current state is " + state)
return True
write_log ("current state is " + state + " but we expected " + state_expected)
return False
def get_situation():
out = cli.array_info(ARRAYNAME)
situ = json_parser.get_situation(out)
return situ
def check_situation(situ_expected):
situ = get_situation()
if situ == situ_expected:
write_log ("current situation is " + situ)
return True
write_log ("current situation is " + situ + " but we expected " + situ_expected)
return False
def do_event(elapsed_hour):
if elapsed_hour == 0:
ret = init_test()
if ret == True:
if add_new_vol_and_do_io() == True:
return check_situation("NORMAL")
return False
elif elapsed_hour >= 1 and elapsed_hour <= 11:
if (VOL_CNT >= MAX_VOL_CNT // 2):
return check_situation("NORMAL")
if add_new_vol_and_do_io() == True:
return check_situation("NORMAL")
return False
elif elapsed_hour == 12:
ret = restart_pos()
if ret == True:
scan_dev()
if mount_pos() == True:
for i in range(MAX_VOL_CNT // 2):
mnt_res = mount_vol(VOL_NAME_PREFIX + str(i))
if mnt_res == False:
return False
fio_util.start_fio(i)
for i in range(MAX_VOL_CNT - (MAX_VOL_CNT// 2)):
add_new_vol_and_do_io()
return check_situation("NORMAL")
return False
# for i in range(MAX_VOL_CNT - (MAX_VOL_CNT// 2)):
# add_new_vol_and_do_io()
# return check_situation("NORMAL")
elif elapsed_hour == 13:
return check_situation("NORMAL")
elif elapsed_hour == 14:
detach_data(DEV_1)
time.sleep(10)
return check_situation("DEGRADED")
elif elapsed_hour >= 15 and elapsed_hour <= 35:
return check_situation("DEGRADED")
elif elapsed_hour == 36:
ret = add_spare(DEV_4)
time.sleep(10)
if ret == True:
return check_situation("REBUILDING")
return False
elif elapsed_hour >= 37 and elapsed_hour <= 39:
return check_situation("NORMAL") or check_situation("REBUILDING")
elif elapsed_hour == 40:
detach_data(DEV_2)
time.sleep(10)
return check_situation("DEGRADED")
elif elapsed_hour >= 41 and elapsed_hour <= 43:
return check_situation("DEGRADED")
elif elapsed_hour == 44:
ret = add_spare(DEV_1_RECYCLED)
time.sleep(10)
if ret == True:
return check_situation("REBUILDING")
return False
elif elapsed_hour >= 45 and elapsed_hour <= 47:
return check_situation("NORMAL") or check_situation("REBUILDING")
elif elapsed_hour == 48:
detach_data(DEV_3)
time.sleep(10)
return check_situation("DEGRADED")
elif elapsed_hour >= 49 and elapsed_hour <= 51:
return check_situation("DEGRADED")
elif elapsed_hour == 52:
ret = add_spare(DEV_2_RECYCLED)
time.sleep(10)
if ret == True:
return check_situation("REBUILDING")
return False
elif elapsed_hour >= 53 and elapsed_hour <= 55:
return check_situation("NORMAL") or check_situation("REBUILDING")
elif elapsed_hour == 56:
detach_data(DEV_4)
time.sleep(10)
return check_situation("DEGRADED")
elif elapsed_hour >= 57 and elapsed_hour <= 59:
return check_situation("DEGRADED")
elif elapsed_hour == 60:
ret = add_spare(DEV_3_RECYCLED)
time.sleep(10)
if ret == True:
return check_situation("REBUILDING")
return False
elif elapsed_hour >= 61 and elapsed_hour <= 71:
return check_situation("NORMAL") or check_situation("REBUILDING")
elif elapsed_hour == TESTTIME_IN_HOUR:
return True
else:
write_log ("unaddressed timing")
return False
def main(ip_addr):
pos.set_addr(ip_addr)
write_log("IPADDRESS: " + pos.TR_ADDR)
print_time()
global RC
RC = do_event(0)
if RC == True:
t = TestTimer(SECONDS_PER_MINUTE, timer_tick)
t.start()
write_log ("TEST STARTED")
t.join()
write_log ("thread joined")
if RC == False:
write_log("TEST FAILED AFTER " + str(ELAPSED_HOUR) +"h")
abort_pos()
exit(-1)
else:
write_log("TEST SUCCESS")
if exit_pos() == False:
abort_pos()
exit(0)
if __name__ == "__main__":
write_log("============== START 72 HOUR TEST ==============")
write_log("SECONDS PER MIN: " + str(SECONDS_PER_MINUTE))
write_log("MINUTES PER HOUR: " + str(MINUTES_PER_HOUR))
main(sys.argv[1])
|
the-stack_106_14003
|
"""
Contains library functions
"""
import json
import logging
import os.path
from typing import Optional
from markupsafe import escape
from galaxy import util
from galaxy.exceptions import (
AdminRequiredException,
ConfigDoesNotAllowException,
ItemAccessibilityException,
ObjectNotFound,
RequestParameterInvalidException,
)
from galaxy.model import LibraryDataset
from galaxy.tools.actions import upload_common
from galaxy.tools.parameters import populate_state
from galaxy.util.path import (
safe_contains,
safe_relpath,
unsafe_walk
)
log = logging.getLogger(__name__)
def validate_server_directory_upload(trans, server_dir):
if server_dir in [None, 'None', '']:
raise RequestParameterInvalidException("Invalid or unspecified server_dir parameter")
if trans.user_is_admin:
import_dir = trans.app.config.library_import_dir
import_dir_desc = 'library_import_dir'
if not import_dir:
raise ConfigDoesNotAllowException('"library_import_dir" is not set in the Galaxy configuration')
else:
import_dir = trans.app.config.user_library_import_dir
if not import_dir:
raise ConfigDoesNotAllowException('"user_library_import_dir" is not set in the Galaxy configuration')
if server_dir != trans.user.email:
import_dir = os.path.join(import_dir, trans.user.email)
import_dir_desc = 'user_library_import_dir'
full_dir = os.path.join(import_dir, server_dir)
unsafe = None
if safe_relpath(server_dir):
username = trans.user.username if trans.app.config.user_library_import_check_permissions else None
if import_dir_desc == 'user_library_import_dir' and safe_contains(import_dir, full_dir, allowlist=trans.app.config.user_library_import_symlink_allowlist):
for unsafe in unsafe_walk(full_dir, allowlist=[import_dir] + trans.app.config.user_library_import_symlink_allowlist, username=username):
log.error('User attempted to import a path that resolves to a path outside of their import dir: %s -> %s', unsafe, os.path.realpath(unsafe))
else:
log.error('User attempted to import a directory path that resolves to a path outside of their import dir: %s -> %s', server_dir, os.path.realpath(full_dir))
unsafe = True
if unsafe:
raise RequestParameterInvalidException("Invalid server_dir specified")
return full_dir, import_dir_desc
def validate_path_upload(trans):
if not trans.app.config.allow_library_path_paste:
raise ConfigDoesNotAllowException('"allow_path_paste" is not set to True in the Galaxy configuration file')
if not trans.user_is_admin:
raise AdminRequiredException('Uploading files via filesystem paths can only be performed by administrators')
class LibraryActions:
"""
Mixin for controllers that provide library functionality.
"""
def _upload_dataset(self, trans, folder_id: str, replace_dataset: Optional[LibraryDataset] = None, **kwd):
# Set up the traditional tool state/params
cntrller = 'api'
tool_id = 'upload1'
message = None
file_type = kwd.get('file_type')
try:
upload_common.validate_datatype_extension(datatypes_registry=trans.app.datatypes_registry, ext=file_type)
except RequestParameterInvalidException as e:
return (400, util.unicodify(e))
tool = trans.app.toolbox.get_tool(tool_id)
state = tool.new_state(trans)
populate_state(trans, tool.inputs, kwd, state.inputs)
tool_params = state.inputs
dataset_upload_inputs = []
for input in tool.inputs.values():
if input.type == "upload_dataset":
dataset_upload_inputs.append(input)
# Library-specific params
server_dir = kwd.get('server_dir', '')
upload_option = kwd.get('upload_option', 'upload_file')
response_code = 200
if upload_option == 'upload_directory':
full_dir, import_dir_desc = validate_server_directory_upload(trans, server_dir)
message = 'Select a directory'
elif upload_option == 'upload_paths':
# Library API already checked this - following check isn't actually needed.
validate_path_upload(trans)
# Some error handling should be added to this method.
try:
# FIXME: instead of passing params here ( which have been processed by util.Params(), the original kwd
# should be passed so that complex objects that may have been included in the initial request remain.
library_bunch = upload_common.handle_library_params(trans, kwd, folder_id, replace_dataset)
except Exception:
response_code = 500
message = "Unable to parse upload parameters, please report this error."
# Proceed with (mostly) regular upload processing if we're still errorless
if response_code == 200:
if upload_option == 'upload_file':
tool_params = upload_common.persist_uploads(tool_params, trans)
uploaded_datasets = upload_common.get_uploaded_datasets(trans, cntrller, tool_params, dataset_upload_inputs, library_bunch=library_bunch)
elif upload_option == 'upload_directory':
uploaded_datasets, response_code, message = self._get_server_dir_uploaded_datasets(trans, kwd, full_dir, import_dir_desc, library_bunch, response_code, message)
elif upload_option == 'upload_paths':
uploaded_datasets, response_code, message = self._get_path_paste_uploaded_datasets(trans, kwd, library_bunch, response_code, message)
if upload_option == 'upload_file' and not uploaded_datasets:
response_code = 400
message = 'Select a file, enter a URL or enter text'
if response_code != 200:
return (response_code, message)
json_file_path = upload_common.create_paramfile(trans, uploaded_datasets)
data_list = [ud.data for ud in uploaded_datasets]
job_params = {}
job_params['link_data_only'] = json.dumps(kwd.get('link_data_only', 'copy_files'))
job_params['uuid'] = json.dumps(kwd.get('uuid', None))
job, output = upload_common.create_job(trans, tool_params, tool, json_file_path, data_list, folder=library_bunch.folder, job_params=job_params)
trans.app.job_manager.enqueue(job, tool=tool)
return output
def _get_server_dir_uploaded_datasets(self, trans, params, full_dir, import_dir_desc, library_bunch, response_code, message):
dir_response = self._get_server_dir_files(params, full_dir, import_dir_desc)
files = dir_response[0]
if not files:
return dir_response
uploaded_datasets = []
for file in files:
name = os.path.basename(file)
uploaded_datasets.append(self._make_library_uploaded_dataset(trans, params, name, file, 'server_dir', library_bunch))
return uploaded_datasets, 200, None
def _get_server_dir_files(self, params, full_dir, import_dir_desc):
files = []
try:
for entry in os.listdir(full_dir):
# Only import regular files
path = os.path.join(full_dir, entry)
link_data_only = params.get('link_data_only', 'copy_files')
if os.path.islink(full_dir) and link_data_only == 'link_to_files':
# If we're linking instead of copying and the
# sub-"directory" in the import dir is actually a symlink,
# dereference the symlink, but not any of its contents.
link_path = os.readlink(full_dir)
if os.path.isabs(link_path):
path = os.path.join(link_path, entry)
else:
path = os.path.abspath(os.path.join(link_path, entry))
elif os.path.islink(path) and os.path.isfile(path) and link_data_only == 'link_to_files':
# If we're linking instead of copying and the "file" in the
# sub-directory of the import dir is actually a symlink,
# dereference the symlink (one dereference only, Vasili).
link_path = os.readlink(path)
if os.path.isabs(link_path):
path = link_path
else:
path = os.path.abspath(os.path.join(os.path.dirname(path), link_path))
if os.path.isfile(path):
files.append(path)
except Exception as e:
message = f"Unable to get file list for configured {import_dir_desc}, error: {util.unicodify(e)}"
response_code = 500
return None, response_code, message
if not files:
message = f"The directory '{full_dir}' contains no valid files"
response_code = 400
return None, response_code, message
return files, None, None
def _get_path_paste_uploaded_datasets(self, trans, params, library_bunch, response_code, message):
preserve_dirs = util.string_as_bool(params.get('preserve_dirs', False))
uploaded_datasets = []
(files_and_folders, _response_code, _message) = self._get_path_files_and_folders(params, preserve_dirs)
if _response_code:
return (uploaded_datasets, _response_code, _message)
for (path, name, folder) in files_and_folders:
uploaded_datasets.append(self._make_library_uploaded_dataset(trans, params, name, path, 'path_paste', library_bunch, folder))
return uploaded_datasets, 200, None
def _get_path_files_and_folders(self, params, preserve_dirs):
problem_response = self._check_path_paste_params(params)
if problem_response:
return problem_response
files_and_folders = []
for (line, path) in self._paths_list(params):
line_files_and_folders = self._get_single_path_files_and_folders(line, path, preserve_dirs)
files_and_folders.extend(line_files_and_folders)
return files_and_folders, None, None
def _get_single_path_files_and_folders(self, line, path, preserve_dirs):
files_and_folders = []
if os.path.isfile(path):
name = os.path.basename(path)
files_and_folders.append((path, name, None))
for basedir, _dirs, files in os.walk(line):
for file in files:
file_path = os.path.abspath(os.path.join(basedir, file))
if preserve_dirs:
in_folder = os.path.dirname(file_path.replace(path, '', 1).lstrip('/'))
else:
in_folder = None
files_and_folders.append((file_path, file, in_folder))
return files_and_folders
def _paths_list(self, params):
return [(line.strip(), os.path.abspath(line.strip())) for line in params.get('filesystem_paths', '').splitlines() if line.strip()]
def _check_path_paste_params(self, params):
if params.get('filesystem_paths', '') == '':
message = "No paths entered in the upload form"
response_code = 400
return None, response_code, message
bad_paths = []
for (_, path) in self._paths_list(params):
if not os.path.exists(path):
bad_paths.append(path)
if bad_paths:
message = 'Invalid paths: "%s".' % '", "'.join(bad_paths)
response_code = 400
return None, response_code, message
return None
def _make_library_uploaded_dataset(self, trans, params, name, path, type, library_bunch, in_folder=None):
link_data_only = params.get('link_data_only', 'copy_files')
uuid_str = params.get('uuid', None)
file_type = params.get('file_type', None)
library_bunch.replace_dataset = None # not valid for these types of upload
uploaded_dataset = util.bunch.Bunch()
new_name = name
# Remove compressed file extensions, if any, but only if
# we're copying files into Galaxy's file space.
if link_data_only == 'copy_files':
if new_name.endswith('.gz'):
new_name = new_name.rstrip('.gz')
elif new_name.endswith('.zip'):
new_name = new_name.rstrip('.zip')
uploaded_dataset.name = new_name
uploaded_dataset.path = path
uploaded_dataset.type = type
uploaded_dataset.ext = None
uploaded_dataset.file_type = file_type
uploaded_dataset.dbkey = params.get('dbkey', None)
uploaded_dataset.to_posix_lines = params.get('to_posix_lines', None)
uploaded_dataset.space_to_tab = params.get('space_to_tab', None)
uploaded_dataset.tag_using_filenames = params.get('tag_using_filenames', False)
uploaded_dataset.tags = params.get('tags', None)
uploaded_dataset.purge_source = getattr(trans.app.config, 'ftp_upload_purge', True)
if in_folder:
uploaded_dataset.in_folder = in_folder
uploaded_dataset.data = upload_common.new_upload(trans, 'api', uploaded_dataset, library_bunch)
uploaded_dataset.link_data_only = link_data_only
uploaded_dataset.uuid = uuid_str
if link_data_only == 'link_to_files':
uploaded_dataset.data.link_to(path)
trans.sa_session.add_all((uploaded_dataset.data, uploaded_dataset.data.dataset))
trans.sa_session.flush()
return uploaded_dataset
def _create_folder(self, trans, parent_id, library_id, **kwd):
is_admin = trans.user_is_admin
current_user_roles = trans.get_current_user_roles()
try:
parent_folder = trans.sa_session.query(trans.app.model.LibraryFolder).get(trans.security.decode_id(parent_id))
except Exception:
parent_folder = None
# Check the library which actually contains the user-supplied parent folder, not the user-supplied
# library, which could be anything.
self._check_access(trans, is_admin, parent_folder, current_user_roles)
self._check_add(trans, is_admin, parent_folder, current_user_roles)
new_folder = trans.app.model.LibraryFolder(name=kwd.get('name', ''),
description=kwd.get('description', ''))
# We are associating the last used genome build with folders, so we will always
# initialize a new folder with the first dbkey in genome builds list which is currently
# ? unspecified (?)
new_folder.genome_build = trans.app.genome_builds.default_value
parent_folder.add_folder(new_folder)
trans.sa_session.add(new_folder)
trans.sa_session.flush()
# New folders default to having the same permissions as their parent folder
trans.app.security_agent.copy_library_permissions(trans, parent_folder, new_folder)
return 200, dict(created=new_folder)
def _check_access(self, trans, is_admin, item, current_user_roles):
if isinstance(item, trans.model.HistoryDatasetAssociation):
# Make sure the user has the DATASET_ACCESS permission on the history_dataset_association.
if not item:
message = f"Invalid history dataset ({escape(str(item))}) specified."
raise ObjectNotFound(message)
elif not trans.app.security_agent.can_access_dataset(current_user_roles, item.dataset) and item.history.user == trans.user:
message = f"You do not have permission to access the history dataset with id ({str(item.id)})."
raise ItemAccessibilityException(message)
else:
# Make sure the user has the LIBRARY_ACCESS permission on the library item.
if not item:
message = f"Invalid library item ({escape(str(item))}) specified."
raise ObjectNotFound(message)
elif not (is_admin or trans.app.security_agent.can_access_library_item(current_user_roles, item, trans.user)):
if isinstance(item, trans.model.Library):
item_type = 'data library'
elif isinstance(item, trans.model.LibraryFolder):
item_type = 'folder'
else:
item_type = '(unknown item type)'
message = f"You do not have permission to access the {escape(item_type)} with id ({str(item.id)})."
raise ItemAccessibilityException(message)
def _check_add(self, trans, is_admin, item, current_user_roles):
# Deny access if the user is not an admin and does not have the LIBRARY_ADD permission.
if not (is_admin or trans.app.security_agent.can_add_library_item(current_user_roles, item)):
message = f"You are not authorized to add an item to ({escape(item.name)})."
raise ItemAccessibilityException(message)
|
the-stack_106_14005
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class BatchMatrixDiagTest(tf.test.TestCase):
_use_gpu = False
def testVector(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
v_diag = tf.batch_matrix_diag(v)
self.assertEqual((3, 3), v_diag.get_shape())
self.assertAllEqual(v_diag.eval(), mat)
def testBatchVector(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]])
mat_batch = np.array(
[[[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]],
[[4.0, 0.0, 0.0],
[0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]])
v_batch_diag = tf.batch_matrix_diag(v_batch)
self.assertEqual((2, 3, 3), v_batch_diag.get_shape())
self.assertAllEqual(v_batch_diag.eval(), mat_batch)
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must have rank at least 1"):
tf.batch_matrix_diag(0)
def testInvalidShapeAtEval(self):
with self.test_session(use_gpu=self._use_gpu):
v = tf.placeholder(dtype=tf.float32)
with self.assertRaisesOpError("input must be at least 1-dim"):
tf.batch_matrix_diag(v).eval(feed_dict={v: 0.0})
def testGrad(self):
shapes = ((3,), (7, 4))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
x = tf.constant(np.random.rand(*shape), np.float32)
y = tf.batch_matrix_diag(x)
error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
y, y.get_shape().as_list())
self.assertLess(error, 1e-4)
class BatchMatrixDiagGpuTest(BatchMatrixDiagTest):
_use_gpu = True
class BatchMatrixDiagPartTest(tf.test.TestCase):
_use_gpu = False
def testMatrix(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
mat_diag = tf.batch_matrix_diag_part(mat)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag.eval(), v)
def testBatchMatrix(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]])
mat_batch = np.array(
[[[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]],
[[4.0, 0.0, 0.0],
[0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]])
self.assertEqual(mat_batch.shape, (2, 3, 3))
mat_batch_diag = tf.batch_matrix_diag_part(mat_batch)
self.assertEqual((2, 3), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must have rank at least 2"):
tf.batch_matrix_diag_part(0)
with self.assertRaisesRegexp(ValueError, r"Dimensions .* not compatible"):
tf.batch_matrix_diag_part([[0, 1], [1, 0], [0, 0]])
def testInvalidShapeAtEval(self):
with self.test_session(use_gpu=self._use_gpu):
v = tf.placeholder(dtype=tf.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
tf.batch_matrix_diag_part(v).eval(feed_dict={v: 0.0})
with self.assertRaisesOpError("last two dimensions must be equal"):
tf.batch_matrix_diag_part(v).eval(
feed_dict={v: [[0, 1], [1, 0], [0, 0]]})
def testGrad(self):
shapes = ((3, 3), (5, 3, 3))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
x = tf.constant(np.random.rand(*shape), dtype=np.float32)
y = tf.batch_matrix_diag_part(x)
error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
y, y.get_shape().as_list())
self.assertLess(error, 1e-4)
class BatchMatrixDiagPartGpuTest(BatchMatrixDiagPartTest):
_use_gpu = True
class DiagTest(tf.test.TestCase):
def diagOp(self, diag, dtype, expected_ans, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.diag(tf.convert_to_tensor(diag.astype(dtype)))
out = tf_ans.eval()
tf_ans_inv = tf.diag_part(expected_ans)
inv_out = tf_ans_inv.eval()
self.assertAllClose(out, expected_ans)
self.assertAllClose(inv_out, diag)
self.assertShapeEqual(expected_ans, tf_ans)
self.assertShapeEqual(diag, tf_ans_inv)
def testEmptyTensor(self):
x = np.array([])
expected_ans = np.empty([0, 0])
self.diagOp(x, np.int32, expected_ans)
def testRankOneIntTensor(self):
x = np.array([1, 2, 3])
expected_ans = np.array(
[[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
self.diagOp(x, np.int32, expected_ans)
self.diagOp(x, np.int64, expected_ans)
def testRankOneFloatTensor(self):
x = np.array([1.1, 2.2, 3.3])
expected_ans = np.array(
[[1.1, 0, 0],
[0, 2.2, 0],
[0, 0, 3.3]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankTwoIntTensor(self):
x = np.array([[1, 2, 3], [4, 5, 6]])
expected_ans = np.array(
[[[[1, 0, 0], [0, 0, 0]],
[[0, 2, 0], [0, 0, 0]],
[[0, 0, 3], [0, 0, 0]]],
[[[0, 0, 0], [4, 0, 0]],
[[0, 0, 0], [0, 5, 0]],
[[0, 0, 0], [0, 0, 6]]]])
self.diagOp(x, np.int32, expected_ans)
self.diagOp(x, np.int64, expected_ans)
def testRankTwoFloatTensor(self):
x = np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]])
expected_ans = np.array(
[[[[1.1, 0, 0], [0, 0, 0]],
[[0, 2.2, 0], [0, 0, 0]],
[[0, 0, 3.3], [0, 0, 0]]],
[[[0, 0, 0], [4.4, 0, 0]],
[[0, 0, 0], [0, 5.5, 0]],
[[0, 0, 0], [0, 0, 6.6]]]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankThreeFloatTensor(self):
x = np.array([[[1.1, 2.2], [3.3, 4.4]],
[[5.5, 6.6], [7.7, 8.8]]])
expected_ans = np.array(
[[[[[[1.1, 0], [0, 0]], [[0, 0], [0, 0]]],
[[[0, 2.2], [0, 0]], [[0, 0], [0, 0]]]],
[[[[0, 0], [3.3, 0]], [[0, 0], [0, 0]]],
[[[0, 0], [0, 4.4]], [[0, 0], [0, 0]]]]],
[[[[[0, 0], [0, 0]], [[5.5, 0], [0, 0]]],
[[[0, 0], [0, 0]], [[0, 6.6], [0, 0]]]],
[[[[0, 0], [0, 0]], [[0, 0], [7.7, 0]]],
[[[0, 0], [0, 0]], [[0, 0], [0, 8.8]]]]]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
class DiagPartOpTest(tf.test.TestCase):
def setUp(self):
np.random.seed(0)
def diagPartOp(self, tensor, dtype, expected_ans, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
tensor = tf.convert_to_tensor(tensor.astype(dtype))
tf_ans_inv = tf.diag_part(tensor)
inv_out = tf_ans_inv.eval()
self.assertAllClose(inv_out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans_inv)
def testRankTwoFloatTensor(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankFourFloatTensorUnknownShape(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
for shape in None, (None, 3), (3, None):
with self.test_session(use_gpu=False):
t = tf.convert_to_tensor(x.astype(np.float32))
t.set_shape(shape)
tf_ans = tf.diag_part(t)
out = tf_ans.eval()
self.assertAllClose(out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans)
def testRankFourFloatTensor(self):
x = np.random.rand(2, 3, 2, 3)
i = np.arange(2)[:, None]
j = np.arange(3)
expected_ans = x[i, j, i, j]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankSixFloatTensor(self):
x = np.random.rand(2, 2, 2, 2, 2, 2)
i = np.arange(2)[:, None, None]
j = np.arange(2)[:, None]
k = np.arange(2)
expected_ans = x[i, j, k, i, j, k]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testOddRank(self):
w = np.random.rand(2)
x = np.random.rand(2, 2, 2)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
def testUnevenDimensions(self):
w = np.random.rand(2, 5)
x = np.random.rand(2, 1, 2, 3)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
class DiagGradOpTest(tf.test.TestCase):
def testDiagGrad(self):
np.random.seed(0)
shapes = ((3,), (3,3), (3,3,3))
dtypes = (tf.float32, tf.float64)
with self.test_session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
x1 = tf.constant(np.random.rand(*shape), dtype=dtype)
y = tf.diag(x1)
error = tf.test.compute_gradient_error(x1, x1.get_shape().as_list(),
y, y.get_shape().as_list())
tf.logging.info("error = %f", error)
self.assertLess(error, 1e-4)
class DiagGradPartOpTest(tf.test.TestCase):
def testDiagPartGrad(self):
np.random.seed(0)
shapes = ((3,3), (3,3,3,3))
dtypes = (tf.float32, tf.float64)
with self.test_session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
x1 = tf.constant(np.random.rand(*shape), dtype=dtype)
y = tf.diag_part(x1)
error = tf.test.compute_gradient_error(x1, x1.get_shape().as_list(),
y, y.get_shape().as_list())
tf.logging.info("error = %f", error)
self.assertLess(error, 1e-4)
if __name__ == "__main__":
tf.test.main()
|
the-stack_106_14006
|
import math
import random
from typing import Optional, List, Iterator
import torch
from allennlp.common.util import lazy_groups_of
from allennlp.common.tqdm import Tqdm
from allennlp.data.data_loaders.data_loader import DataLoader, TensorDict
from allennlp.data.data_loaders.data_collator import DefaultDataCollator
from allennlp.data.dataset_readers import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.vocabulary import Vocabulary
import allennlp.nn.util as nn_util
@DataLoader.register("simple", constructor="from_dataset_reader")
class SimpleDataLoader(DataLoader):
"""
A very simple `DataLoader` that is mostly used for testing.
"""
def __init__(
self,
instances: List[Instance],
batch_size: int,
*,
shuffle: bool = False,
batches_per_epoch: Optional[int] = None,
vocab: Optional[Vocabulary] = None,
) -> None:
self.instances = instances
self.batch_size = batch_size
self.shuffle = shuffle
self.batches_per_epoch = batches_per_epoch
self.vocab = vocab
self.cuda_device: Optional[torch.device] = None
self._batch_generator: Optional[Iterator[TensorDict]] = None
self.collate_fn = DefaultDataCollator()
def __len__(self) -> int:
if self.batches_per_epoch is not None:
return self.batches_per_epoch
return math.ceil(len(self.instances) / self.batch_size)
def __iter__(self) -> Iterator[TensorDict]:
if self.batches_per_epoch is None:
yield from self._iter_batches()
else:
if self._batch_generator is None:
self._batch_generator = self._iter_batches()
for i in range(self.batches_per_epoch):
try:
yield next(self._batch_generator)
except StopIteration: # data_generator is exhausted
self._batch_generator = self._iter_batches() # so refresh it
yield next(self._batch_generator)
def _iter_batches(self) -> Iterator[TensorDict]:
if self.shuffle:
random.shuffle(self.instances)
for batch in lazy_groups_of(self.iter_instances(), self.batch_size):
tensor_dict = self.collate_fn(batch)
if self.cuda_device is not None:
tensor_dict = nn_util.move_to_device(tensor_dict, self.cuda_device)
yield tensor_dict
def iter_instances(self) -> Iterator[Instance]:
for instance in self.instances:
if self.vocab is not None:
instance.index_fields(self.vocab)
yield instance
def index_with(self, vocab: Vocabulary) -> None:
self.vocab = vocab
for instance in self.instances:
instance.index_fields(self.vocab)
def set_target_device(self, device: torch.device) -> None:
self.cuda_device = device
@classmethod
def from_dataset_reader(
cls,
reader: DatasetReader,
data_path: str,
batch_size: int,
shuffle: bool = False,
batches_per_epoch: Optional[int] = None,
quiet: bool = False,
) -> "SimpleDataLoader":
instance_iter = reader.read(data_path)
if not quiet:
instance_iter = Tqdm.tqdm(instance_iter, desc="loading instances")
instances = list(instance_iter)
return cls(instances, batch_size, shuffle=shuffle, batches_per_epoch=batches_per_epoch)
|
the-stack_106_14010
|
# Script Name : fileinfo.py
# Author : Not sure where I got this from
# Created : 28th November 2011
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Show file information for a given file
# get file information using os.stat()
# tested with Python24 vegsaeat 25sep2006
from __future__ import print_function
import os
import sys
import stat # index constants for os.stat()
import time
if sys.version_info >= (3, 0):
raw_input = input
try_count = 16
while try_count:
file_name = raw_input("Enter a file name: ") # pick a file you have
fhand = open(file_name)
count = 0
for lines in fhand:
count = count + 1
fhand = open(file_name)
inp = fhand.read()
t_char = len(inp)
try_count >>= 1
try:
file_stats = os.stat(file_name)
print ("This is os.stat",file_stats)
break
except OSError:
print ("\nNameError : [%s] No such file or directory\n", file_name)
if try_count == 0:
print ("Trial limit exceeded \nExiting program")
sys.exit()
# create a dictionary to hold file info
file_info = {
'fname': file_name,
'fsize': file_stats[stat.ST_SIZE],
'f_lm' : time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(file_stats[stat.ST_MTIME])),
'f_la' : time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(file_stats[stat.ST_ATIME])),
'f_ct' : time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(file_stats[stat.ST_CTIME])),
'no_of_lines':count,
't_char':t_char
}
print ("\nfile name =", file_info['fname'])
print ("file size =", file_info['fsize'] , "bytes")
print ("last modified =", file_info['f_lm'])
print ("last accessed =", file_info['f_la'])
print ("creation time =", file_info['f_ct'])
print ("Total number of lines are =", file_info['no_of_lines'])
print ("Total number of characters are =", file_info['t_char'])
if stat.S_ISDIR(file_stats[stat.ST_MODE]):
print ("This a directory")
else:
print ("This is not a directory\n")
print ("A closer look at the os.stat(%s) tuple:" % file_name)
print (file_stats)
print ("\nThe above tuple has the following sequence: ")
print ("""st_mode (protection bits), st_ino (inode number),
st_dev (device), st_nlink (number of hard links),
st_uid (user ID of owner), st_gid (group ID of owner),
st_size (file size, bytes), st_atime (last access time, seconds since epoch),
st_mtime (last modification time), st_ctime (time of creation, Windows)"""
)
|
the-stack_106_14011
|
from typing import (Any,
List,
Tuple)
from hypothesis import strategies
from tests.utils import (BoundPortedSetsPair,
Strategy,
to_bound_ported_sets_pair)
objects = strategies.integers()
empty_lists = strategies.builds(list)
objects_lists = strategies.lists(objects)
non_empty_objects_lists = strategies.lists(objects,
min_size=1)
sets_pairs = strategies.builds(to_bound_ported_sets_pair,
objects_lists)
empty_sets_pairs = strategies.builds(to_bound_ported_sets_pair,
empty_lists)
non_empty_sets_pairs = strategies.builds(to_bound_ported_sets_pair,
non_empty_objects_lists)
def to_non_empty_sets_pairs_with_their_elements(
values: List[Any]) -> Strategy[Tuple[BoundPortedSetsPair, Any]]:
pair = to_bound_ported_sets_pair(values)
return strategies.tuples(strategies.just(pair),
strategies.sampled_from(values))
non_empty_sets_pairs_with_their_elements = non_empty_objects_lists.flatmap(
to_non_empty_sets_pairs_with_their_elements)
def to_sets_pairs_with_non_their_elements(
values: List[Any]) -> Strategy[Tuple[BoundPortedSetsPair, Any]]:
pair = to_bound_ported_sets_pair(values)
return strategies.tuples(strategies.just(pair),
objects.filter(lambda candidate
: candidate not in values)
if values
else objects)
sets_pairs_with_non_their_elements = (
objects_lists.flatmap(to_sets_pairs_with_non_their_elements))
|
the-stack_106_14012
|
from typing import Dict
from ..base_category import BaseCategory
# 皮革羽毛 leather-and-feather
class LeatherAndFeather(object):
def leather_and_feather() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '皮革羽毛'
list['id'] = 'leather-and-feather'
leather_and_feather = {}
leather_and_feather['leather-fur'] = '皮革、毛皮'
leather_and_feather['feather-products'] = '羽毛'
list['sub'] = leather_and_feather
return list
# 林業及林產品 forest-products
class ForestProducts(object):
def forest_products() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '林業及林產品'
list['id'] = 'forest-products'
forest_products = {}
forest_products['wood'] = '木材'
forest_products['wooden-case-pallet'] = '木箱、棧板'
forest_products['bamboo-rattan'] = '竹材、籐材'
forest_products['charcoal'] = '木炭、木柴'
list['sub'] = forest_products
return list
# 畜牧業及產品 livestock-and-products
class LivestockAndProducts(object):
def livestock_and_products() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '畜牧業及產品'
list['id'] = 'livestock-and-products'
livestock_and_products = {}
livestock_and_products['farm-equip'] = '畜產設備'
livestock_and_products['farms'] = '畜牧場'
livestock_and_products['slaughterhouse'] = '屠宰場'
livestock_and_products['feeds'] = '飼料'
list['sub'] = livestock_and_products
return list
# 農事服務業 agricultural-services
class AgriculturalServices(object):
def agricultural_services() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '農事服務業'
list['id'] = 'agricultural-services'
agricultural_services = {}
agricultural_services['agriculture-service'] = '農事服務'
list['sub'] = agricultural_services
return list
# 農業 agriculture
class Agriculture(object):
def agriculture() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '農業'
list['id'] = 'agriculture'
agriculture = {}
agriculture['orchards-farms'] = '果園、農場'
agriculture['fertilizer'] = '肥料'
agriculture['agriculture-equip'] = '農耕設備'
agriculture['seeds'] = '種子'
list['sub'] = agriculture
return list
# 漁業及漁產品 fisheries-and-products
class FisheriesAndProducts(object):
def fisheries_and_products() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '漁業及漁產品'
list['id'] = 'fisheries-and-products'
fisheries_and_products = {}
fisheries_and_products['aquaculture'] = '水產養殖'
fisheries_and_products['aquaculture-equipment'] = '水產養殖用品'
fisheries_and_products['fishery'] = '漁產捕撈'
list['sub'] = fisheries_and_products
return list
# 礦業及礦產品 mining-and-minerals
class MiningAndMinerals(object):
def mining_and_minerals() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '礦業及礦產品'
list['id'] = 'mining-and-minerals'
mining_and_minerals = {}
mining_and_minerals['quarry'] = '石礦'
mining_and_minerals['coal'] = '煤礦、煤炭'
mining_and_minerals['mining'] = '採礦'
mining_and_minerals['mineral'] = '礦物'
list['sub'] = mining_and_minerals
return list
# 農林漁牧 agriculture
class Agriculture(BaseCategory, LeatherAndFeather, ForestProducts, LivestockAndProducts,
AgriculturalServices, Agriculture, FisheriesAndProducts, MiningAndMinerals):
category_name = '農林漁牧'
category_id = 'agriculture'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.