code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# encoding: utf-8
import sqlite3
from datetime import datetime
class DB:
def connect():
conn = sqlite3.connect("sektor.db")
return conn
def init():
conn = DB.connect()
cursor = conn.cursor()
try:
cursor.execute(
"""CREATE TABLE IF NOT EXISTS track (time INTEGER,
lat DOUBLE PRECISION, lon DOUBLE PRECISION,
speed INT, distance INT, oil BOOLEAN, created_at DATETIME)
"""
)
conn.commit()
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS km_for_oil (id INT AUTO_INCREMENT,
counter INTEGER)
"""
)
conn.commit()
finally:
conn.close()
return True
def update_km_for_oil(distance):
conn = DB.connect()
cursor = conn.cursor()
try:
result = cursor.execute(
"""
UPDATE counter set
"""
).fetchone()
return result[0] if result else False
except Exception as ex:
print("Exception: ", ex)
return False
def get_last_oil_counter():
conn = DB.connect()
cursor = conn.cursor()
try:
result = cursor.execute(
"""
SELECT counter FROM km_for_oil
"""
).fetchone()
return result[0] if result else False
except Exception as ex:
print("Exception: ", ex)
return False
def find_last_position():
conn = DB.connect()
cursor = conn.cursor()
try:
result = cursor.execute(
"""
SELECT
track. `time`,
track.lat,
track.lon,
track.speed,
track.distance,
track.oil
FROM
track
ORDER BY
created_at DESC
LIMIT 1
"""
).fetchone()
return result if result else False
except Exception as ex:
print("Exception on DB.find_last_position()")
print("Exception: ", ex)
return False
finally:
conn.close()
return True
def find_last_oil():
conn = DB.connect()
cursor = cursor.execute("""SELECT * FROM track WHERE distance > 300 """)
return cursor.fetchone()
def save(time, lat, lon, speed, distance, oil=False):
conn = DB.connect()
cursor = conn.cursor()
created_at = datetime.now()
try:
cursor.execute(
"""INSERT INTO track VALUES (?,?,?,?,?,?,?)""",
(time, lat, lon, speed, distance, oil, created_at),
)
conn.commit()
except Exception as ex:
print("Exception on DB.save()")
print("Exception: ", ex) # TO-DO: Use logger
return False
finally:
conn.close()
return True
|
[
"sqlite3.connect",
"datetime.datetime.now"
] |
[((109, 137), 'sqlite3.connect', 'sqlite3.connect', (['"""sektor.db"""'], {}), "('sektor.db')\n", (124, 137), False, 'import sqlite3\n'), ((2726, 2740), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2738, 2740), False, 'from datetime import datetime\n')]
|
import argparse
import pickle
try:
from duck.steps.parametrize import prepare_system
from duck.utils.cal_ints import find_interaction
from duck.steps.equlibrate import do_equlibrate
from duck.utils.check_system import check_if_equlibrated
except ModuleNotFoundError:
print('Dependencies missing; check openmm, pdbfixer, and yank are installed from Omnia.')
def main():
parser = argparse.ArgumentParser(description='Prepare system for dynamic undocking')
parser.add_argument('-p', '--protein', help='Apoprotein in PDB format')
parser.add_argument('-l', '--ligand', help='Ligand in mol format')
# parser.add_argument('-o', '--output', help="PDB output")
parser.add_argument('-c', '--chunk', help='Chunked protein')
parser.add_argument('-i', '--interaction', help='Protein atom to use for ligand interaction.')
parser.add_argument('-s', '--seed', type=int, help='Random seed.')
parser.add_argument('--gpu-id', type=int, help='GPU ID (optional); if not specified, runs on CPU only.')
parser.add_argument('--force-constant-eq', type=float, default=1.0, help='Force constant for equilibration.')
args = parser.parse_args()
# Parameterize the ligand
prepare_system(args.ligand, args.chunk)
# Now find the interaction and save to a file
results = find_interaction(args.interaction, args.protein)
print(results) # what happens to these?
with open('complex_system.pickle', 'rb') as f:
p = pickle.load(f) + results
with open('complex_system.pickle', 'wb') as f:
pickle.dump(p, f, protocol=pickle.HIGHEST_PROTOCOL)
# pickle.dump(l, 'complex_system.pickle')
# Now do the equlibration
do_equlibrate(force_constant_equilibrate=args.force_constant_eq, gpu_id=args.gpu_id)
if not check_if_equlibrated("density.csv", 1):
raise EquilibrationError("System is not equilibrated.")
if __name__ == "__main__":
main()
|
[
"pickle.dump",
"argparse.ArgumentParser",
"duck.utils.cal_ints.find_interaction",
"duck.utils.check_system.check_if_equlibrated",
"duck.steps.parametrize.prepare_system",
"pickle.load",
"duck.steps.equlibrate.do_equlibrate"
] |
[((404, 479), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Prepare system for dynamic undocking"""'}), "(description='Prepare system for dynamic undocking')\n", (427, 479), False, 'import argparse\n'), ((1215, 1254), 'duck.steps.parametrize.prepare_system', 'prepare_system', (['args.ligand', 'args.chunk'], {}), '(args.ligand, args.chunk)\n', (1229, 1254), False, 'from duck.steps.parametrize import prepare_system\n'), ((1319, 1367), 'duck.utils.cal_ints.find_interaction', 'find_interaction', (['args.interaction', 'args.protein'], {}), '(args.interaction, args.protein)\n', (1335, 1367), False, 'from duck.utils.cal_ints import find_interaction\n'), ((1700, 1789), 'duck.steps.equlibrate.do_equlibrate', 'do_equlibrate', ([], {'force_constant_equilibrate': 'args.force_constant_eq', 'gpu_id': 'args.gpu_id'}), '(force_constant_equilibrate=args.force_constant_eq, gpu_id=\n args.gpu_id)\n', (1713, 1789), False, 'from duck.steps.equlibrate import do_equlibrate\n'), ((1559, 1610), 'pickle.dump', 'pickle.dump', (['p', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(p, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (1570, 1610), False, 'import pickle\n'), ((1796, 1834), 'duck.utils.check_system.check_if_equlibrated', 'check_if_equlibrated', (['"""density.csv"""', '(1)'], {}), "('density.csv', 1)\n", (1816, 1834), False, 'from duck.utils.check_system import check_if_equlibrated\n'), ((1475, 1489), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1486, 1489), False, 'import pickle\n')]
|
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
'''测试报告
'''
import sys
import codecs
import cgi
import socket
import os
import shutil
import json
import getpass
import string
import locale
import argparse
import pkg_resources
import xml.dom.minidom as dom
import xml.sax.saxutils as saxutils
from datetime import datetime
from testbase import testresult
from testbase.testresult import EnumLogLevel
REPORT_ENTRY_POINT = "qtaf.report"
report_types = {}
os_encoding = locale.getdefaultlocale()[1]
report_usage = 'runtest <test ...> --report-type <report-type> [--report-args "<report-args>"]'
def _to_unicode( s ):
'''将任意字符串转换为unicode编码
'''
if isinstance(str, unicode):
return s
try:
return s.decode('utf8')
except UnicodeDecodeError:
return s.decode(os_encoding)
class ITestReport(object):
'''测试报告接口
'''
def begin_report(self):
'''开始测试执行
'''
pass
def end_report(self):
'''结束测试执行
:param passed: 测试是否通过
:type passed: boolean
'''
pass
def log_test_result(self, testcase, testresult ):
'''记录一个测试结果
:param testcase: 测试用例
:type testcase: TestCase
:param testresult: 测试结果
:type testresult: TestResult
'''
pass
def log_record(self, level, tag, msg, record):
'''增加一个记录
:param level: 日志级别
:param msg: 日志消息
:param tag: 日志标签
:param record: 日志记录信息
:type level: string
:type tag: string
:type msg: string
:type record: dict
'''
pass
def log_loaded_tests(self, loader, testcases):
'''记录加载成功的用例
:param loader: 用例加载器
:type loader: TestLoader
:param testcases: 测试用例列表
:type testcases: list
'''
pass
def log_filtered_test(self, loader, testcase, reason):
'''记录一个被过滤的测试用例
:param loader: 用例加载器
:type loader: TestLoader
:param testcase: 测试用例
:type testcase: TestCase
:param reason: 过滤原因
:type reason: str
'''
pass
def log_load_error(self, loader, name, error):
'''记录一个加载失败的用例或用例集
:param loader: 用例加载器
:type loader: TestLoader
:param name: 名称
:type name: str
:param error: 错误信息
:type error: str
'''
pass
def log_test_target(self, test_target):
'''记录被测对象
:param test_target: 被测对象详情
:type test_target: any
'''
pass
def log_resource(self, res_type, resource):
'''记录测试使用的资源
:param res_type: 资源类型
:type res_type: str
:param resource: 资源详情
:type resource: dict
'''
pass
def get_testresult_factory(self):
'''获取对应的TestResult工厂
:returns ITestResultFactory
'''
raise NotImplementedError()
def debug(self, tag, msg, record=None):
'''记录一个DEBUG日志
:param msg: 日志消息
:param tag: 日志标签
:param record: 日志记录信息
:type tag: string
:type msg: string
:type record: dict
'''
if record is None:
record = {}
self.log_record(EnumLogLevel.DEBUG, tag, msg, record)
def info(self, tag, msg, record=None):
'''记录一个INFO日志
:param msg: 日志消息
:param tag: 日志标签
:param record: 日志记录信息
:type tag: string
:type msg: string
:type record: dict
'''
if record is None:
record = {}
self.log_record(EnumLogLevel.INFO, tag, msg, record)
def warning(self, tag, msg, record=None):
'''记录一个WARN日志
:param msg: 日志消息
:param tag: 日志标签
:param record: 日志记录信息
:type tag: string
:type msg: string
:type record: dict
'''
if record is None:
record = {}
self.log_record(EnumLogLevel.WARNING, tag, msg, record)
def error(self, tag, msg, record=None):
'''记录一个ERROR日志
:param msg: 日志消息
:param tag: 日志标签
:param record: 日志记录信息
:type tag: string
:type msg: string
:type record: dict
'''
if record is None:
record = {}
self.log_record(EnumLogLevel.ERROR, tag, msg, record)
def critical(self, tag, msg, record=None):
'''记录一个CRITICAL日志
:param msg: 日志消息
:param tag: 日志标签
:param record: 日志记录信息
:type tag: string
:type msg: string
:type record: dict
'''
if record is None:
record = {}
self.log_record(EnumLogLevel.CRITICAL, tag, msg, record)
@classmethod
def get_parser(cls):
'''获取命令行参数解析器(如果实现)
:returns: 解析器对象
:rtype: argparse.ArgumentParser
'''
raise NotImplementedError()
@classmethod
def parse_args(cls, args_string):
'''通过命令行参数构造对象
:returns: 测试报告
:rtype: cls
'''
raise NotImplementedError()
class ITestResultFactory(object):
'''TestResult工厂接口
'''
def create(self, testcase ):
'''创建TestResult对象
:param testcase: 测试用例
:type testcase: TestCase
:return TestResult
'''
raise NotImplementedError()
def dumps(self):
'''序列化
:return picklable object
'''
pass
def loads(self, buf):
'''反序列化
:param buf: dumps返回的序列化后的数据
:type buf: object
'''
pass
class EmptyTestResultFactory(ITestResultFactory):
'''测试结果工厂
'''
def __init__(self, result_factory_func=None ):
'''构造函数
:param result_factory_func: TestResult工厂函数
:type result_factory_func: Function
'''
self._result_factory_func = result_factory_func
def create(self, testcase ):
'''创建TestResult对象
:param testcase: 测试用例
:type testcase: TestCase
:return TestResult
'''
if self._result_factory_func is None:
return testresult.EmptyResult()
else:
return self._result_factory_func(testcase)
def dumps(self):
'''序列化
:return picklable object
'''
return self._result_factory_func
def loads(self, buf):
'''反序列化
:param buf: dumps返回的序列化后的数据
:type buf: object
'''
self._result_factory_func = buf
class EmptyTestReport(ITestReport):
'''不输出测试报告
'''
def __init__(self, result_factory_func=None ):
'''构造函数
:param result_factory_func: TestResult工厂函数
:type result_factory_func: callable
'''
self._result_factory_func = result_factory_func
self._is_passed = True
def get_testresult_factory(self):
'''获取对应的TestResult工厂
:returns ITestResultFactory
'''
return EmptyTestResultFactory(self._result_factory_func)
def log_test_result(self, testcase, testresult ):
'''记录一个测试结果
:param testcase: 测试用例
:type testcase: TestCase
:param testresult: 测试结果
:type testresult: TestResult
'''
if not testresult.passed:
self._is_passed = False
@property
def passed(self):
'''测试是否通过
'''
return self._is_passed
@classmethod
def get_parser(cls):
'''获取命令行参数解析器(如果实现)
:returns: 解析器对象
:rtype: argparse.ArgumentParser
'''
return argparse.ArgumentParser(usage=report_usage)
@classmethod
def parse_args(cls, args_string):
'''通过命令行参数构造对象
:returns: 测试报告
:rtype: cls
'''
return EmptyTestReport()
class StreamTestResultFactory(ITestResultFactory):
'''流形式TestResult工厂
'''
def __init__(self, stream ):
'''构造函数
:param stream: 指定要输出的流设备
:type stream: file
'''
self._stream = stream
def create(self, testcase ):
'''创建TestResult对象
:param testcase: 测试用例
:type testcase: TestCase
:return TestResult
'''
return testresult.StreamResult(self._stream)
def dumps(self):
'''序列化
:return picklable object
'''
fileno = self._stream.fileno()
if fileno not in [0, 1]:
raise ValueError("不支持的流对象: %s" % self._stream)
return fileno
def loads(self, buf):
'''反序列化
:param buf: dumps返回的序列化后的数据
:type buf: object
'''
fileno = buf
if fileno == 1:
self._stream = sys.stdout
elif fileno == 2:
self._stream = sys.stderr
else:
raise ValueError("invalid fd: %s" % fileno )
class StreamTestReport(ITestReport):
'''流形式的测试报告
'''
def __init__(self, stream=sys.stdout, error_stream=sys.stderr, output_testresult=False, output_summary=True ):
'''构造函数
:param stream: 指定要输出的流设备
:type stream: file
:param output_testresult: 是否输出测试用例执行的日志
:type output_testresult: boolean
:param output_summary: 是否输出执行汇总信息
:type output_summary: boolean
'''
self._stream = stream
self._err_stream = error_stream
self._output_testresult = output_testresult
self._output_summary = output_summary
if stream.encoding and stream.encoding != 'utf8':
self._write = lambda x: self._stream.write(x.decode('utf8').encode(stream.encoding))
self._write_err = lambda x: self._err_stream.write(x.decode('utf8').encode(stream.encoding))
else:
self._write = self._stream.write
self._write_err = self._err_stream.write
self._passed_testresults = []
self._failed_testresults = []
def begin_report(self):
'''开始测试执行
'''
self._start_time = datetime.now()
self._write("Test runs at:%s.\n" % self._start_time.strftime("%Y-%m-%d %H:%M:%S"))
def end_report(self):
'''结束测试执行
:param passed: 测试是否通过
:type passed: boolean
'''
end_time = datetime.now()
self._write("Test ends at:%s.\n" % end_time.strftime("%Y-%m-%d %H:%M:%S"))
#self._write("Total execution time is :%s\n" % str(end_time-self._start_time).split('.')[0])
if self._output_summary:
self._write("\n" + "="*60 + "\n")
self._write("SUMMARY:\n\n")
self._write(" Totals: %s\t%0.4fs\n\n" % (len(self._failed_testresults) + len(self._passed_testresults),
(end_time-self._start_time).total_seconds()))
self._write(" Passed: %s\n" % len(self._passed_testresults))
for it in self._passed_testresults:
self._write(" \t%s\t%0.4fs\n" % (it.testcase.test_name,
it.end_time-it.begin_time))
self._write("\n")
self._write(" Failed: %s\n" % len(self._failed_testresults))
for it in self._failed_testresults:
self._write_err(" \t%s\t%0.4fs\n" % (it.testcase.test_name,
it.end_time-it.begin_time))
def log_test_result(self, testcase, testresult ):
'''记录一个测试结果
:param testcase: 测试用例
:type testcase: TestCase
:param testresult: 测试结果
:type testresult: TestResult
'''
if testresult.passed:
self._passed_testresults.append(testresult)
else:
self._failed_testresults.append(testresult)
self._write("run test case: %s(pass?:%s)\n" % (testcase.test_name, testresult.passed))
def log_record(self, level, tag, msg, record={}):
'''增加一个记录
:param level: 日志级别
:param msg: 日志消息
:param tag: 日志标签
:param record: 日志记录信息
:type level: string
:type tag: string
:type msg: string
:type record: dict
'''
self._write("%s\n" % (msg))
def log_filtered_test(self, loader, testcase, reason):
'''记录一个被过滤的测试用例
:param loader: 用例加载器
:type loader: TestLoader
:param testcase: 测试用例
:type testcase: TestCase
:param reason: 过滤原因
:type reason: str
'''
self._write("filtered test case: %s (reason: %s)\n" % (testcase.test_name, reason))
def log_load_error(self, loader, name, error):
'''记录一个加载失败的用例或用例集
:param loader: 用例加载器
:type loader: TestLoader
:param name: 名称
:type name: str
:param error: 错误信息
:type error: str
'''
line = ""
for line in reversed(error.split("\n")):
if line.strip():
break
self._write_err("load test failed: %s (error: %s)\n" % (name, line))
def get_testresult_factory(self):
'''获取对应的TestResult工厂
:returns ITestResultFactory
'''
if self._output_testresult:
return StreamTestResultFactory(self._stream)
else:
return EmptyTestResultFactory()
@classmethod
def get_parser(cls):
'''获取命令行参数解析器(如果实现)
:returns: 解析器对象
:rtype: argparse.ArgumentParser
'''
parser = argparse.ArgumentParser(usage=report_usage)
parser.add_argument("--no-output-result", action="store_true", help="don't output detail result of test cases")
parser.add_argument("--no-summary", action="store_true", help="don't output summary information")
return parser
@classmethod
def parse_args(cls, args_string):
'''通过命令行参数构造对象
:returns: 测试报告
:rtype: cls
'''
args = cls.get_parser().parse_args(args_string)
return cls(
output_testresult=not args.no_output_result,
output_summary=not args.no_summary)
REPORT_XSL = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/RunResult">
<html>
<head>
<style>
*{
font-size:12px;
font-family: '宋体' , 'Courier New', Arial, 'Arial Unicode MS', '';
}
.title
{
font-size:14px;
font-weight: bold;
margin: 20px auto 5px auto;
}
table{
border:solid 1px #0099CC;
border-collapse:collapse;
margin: 0px auto;
}
td
{
border:solid 1px #0099CC;
padding: 6px 6px;
}
.td_Title
{
color:#FFF;
font-weight: bold;
background-color:#66CCFF;
}
.tr_pass
{
background-color:#B3E8B8;
}
.tr_fail
{
background-color:#F5BCBD;
}
.success
{
color:#0000FF;
}
.fail
{
color:#FF0000;
}
.exception
{
color:#00AA00;
}
</style>
</head>
<body>
<div class='title'>
<td>测试报告链接:</td>
<td><a><xsl:attribute name="href"><xsl:value-of select="TestReportLink/Url"/></xsl:attribute>点击这里</a></td>
</div>
<div class='title'>测试运行环境:</div>
<table>
<tr>
<td class='td_Title'>主机名</td>
<td><xsl:value-of select="TestEnv/PC"/></td>
</tr>
<tr>
<td class='td_Title'>操作系统</td>
<td><xsl:value-of select="TestEnv/OS"/></td>
</tr>
</table>
<div class='title'>测试运行时间:</div>
<table>
<tr>
<td class='td_Title'>Run开始时间</td>
<td><xsl:value-of select="RunTime/StartTime"/></td>
</tr>
<tr>
<td class='td_Title'>Run结束时间</td>
<td><xsl:value-of select="RunTime/EndTime"/></td>
</tr>
<tr>
<td class='td_Title'>Run执行时间</td>
<td><xsl:value-of select="RunTime/Duration"/></td>
</tr>
</table>
<div class='title'>测试用例汇总:</div>
<table>
<tr>
<td class='td_Title'>用例总数</td>
<td class='td_Title'>通过用例数</td>
<td class='td_Title'>失败用例数</td>
</tr>
<tr>
<td>
<xsl:value-of select="count(TestResult)"/>
</td>
<td>
<xsl:value-of select="count(TestResult[@result='True'])"/>
</td>
<td>
<xsl:value-of select="count(TestResult[@result='False'])"/>
</td>
</tr>
</table>
<div class='title'>加载失败模块:</div>
<table>
<tr>
<td class='td_Title'>模块名</td>
<td class='td_Title'>失败Log</td>
</tr>
<tr>
<xsl:for-each select="LoadTestError">
<tr>
<td><xsl:value-of select="@name"/></td>
<td><a><xsl:attribute name="href">
<xsl:value-of select="@log"/>
</xsl:attribute>
Log
</a></td>
</tr>
</xsl:for-each>
</tr>
</table>
<div class='title'>测试用例详细信息:</div>
<table>
<tr>
<td class='td_Title'>测试结果</td>
<td class='td_Title'>测试用例</td>
<td class='td_Title'>负责人</td>
<td class='td_Title'>用例描述</td>
<td class='td_Title'>用例状态</td>
<td class='td_Title'>用例Log</td>
</tr>
<xsl:for-each select="TestResult">
<xsl:if test="@result='False'">
<tr class='tr_fail'>
<td>失败</td>
<td><xsl:value-of select="@name"/></td>
<td><xsl:value-of select="@owner"/></td>
<td><xsl:value-of select="."/></td>
<td><xsl:value-of select="@status"/></td>
<td><a><xsl:attribute name="href">
<xsl:value-of select="@log"/>
</xsl:attribute>
Log
</a></td>
</tr>
</xsl:if>
<xsl:if test="@result='True'">
<tr class='tr_pass'>
<td>通过</td>
<td><xsl:value-of select="@name"/></td>
<td><xsl:value-of select="@owner"/></td>
<td><xsl:value-of select="."/></td>
<td><xsl:value-of select="@status"/></td>
<td><a><xsl:attribute name="href">
<xsl:value-of select="@log"/>
</xsl:attribute>
Log
</a></td>
</tr>
</xsl:if>
</xsl:for-each>
</table>
</body>
</html>
</xsl:template>
</xsl:stylesheet>"""
RESULT_XLS = """<?xml version="1.0" encoding="utf-8"?><!-- DWXMLSource="tmp/qqtest.hello.HelloW.xml" --><!DOCTYPE xsl:stylesheet [
<!ENTITY nbsp " ">
<!ENTITY copy "©">
<!ENTITY reg "®">
<!ENTITY trade "™">
<!ENTITY mdash "—">
<!ENTITY ldquo "“">
<!ENTITY rdquo "”">
<!ENTITY pound "£">
<!ENTITY yen "¥">
<!ENTITY euro "€">
]>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:strip-space elements="*"/>
<xsl:template match="/TEST">
<html>
<head>
<style>
*{
font-size:12px;
font-family: '宋体' , 'Courier New', Arial, 'Arial Unicode MS', '';
}
.title
{
font-size:14px;
font-weight: bold;
margin: 20px auto 5px auto;
}
.subtable{
border:solid 1px #0099CC;
border-collapse:collapse;
margin: 0px auto auto 0px;
}
.subtable td
{
border:solid 1px #0099CC;
padding: 6px 6px;
}
.td_title
{
color:#FFF;
font-weight: bold;
background-color:#66CCFF;
}
.tr_pass
{
background-color:#B3E8B8;
}
.tr_fail
{
background-color:#F5BCBD;
}
.suc_step_title
{
background-color:#B3E8B8;
padding:2px 2px
}
.STYLE1 {font-size: 16px}
.STYLE3 {font-size: 14px; color:#666666;}
.STYLE4 {color: #999999}
.STYLE5 {
color: #FF0000;
font-weight: bold;
}
.STYLE6 {
color: #FF9900;
font-weight: bold;
}
</style>
</head>
<body>
<div>
<table class="subtable">
<tr>
<td class='td_title'>用例名字:</td>
<td><xsl:value-of select="@name"/></td>
<td class='td_title'>运行结果:</td>
<td>
<span>
<xsl:attribute name="style">
<xsl:if test="@result='True'">color: #00FF00</xsl:if>
<xsl:if test="@result='False'">color: #FF0000</xsl:if>
</xsl:attribute>
<xsl:apply-templates select="@result"/>
</span>
</td>
</tr>
<tr>
<td class='td_title'>开始时间:</td>
<td><xsl:value-of select="@begintime"/></td>
<td class='td_title'>负责人:</td>
<td><xsl:value-of select="@owner"/></td>
</tr>
<tr>
<td class='td_title'>结束时间:</td>
<td><xsl:value-of select="@endtime"/></td>
<td class='td_title'>优先级:</td>
<td><xsl:value-of select="@priority"/></td>
</tr>
<tr>
<td class="td_title">运行时间:</td>
<td><xsl:value-of select="@duration"/></td>
<td class='td_title'>用例超时:</td>
<td><xsl:value-of select="@timeout"/>分钟</td>
</tr>
</table>
</div>
<xsl:apply-templates/>
</body>
</html>
</xsl:template>
<xsl:template name="break_lines">
<xsl:param name="text" select="string(.)"/>
<xsl:choose>
<xsl:when test="contains($text, '
')">
<xsl:value-of select="substring-before($text, '
')"/>
<br/>
<xsl:call-template name="break_lines">
<xsl:with-param
name="text"
select="substring-after($text, '
')"
/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$text"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="@result">
<xsl:if test=".='True'">通过</xsl:if>
<xsl:if test=".='False'">失败</xsl:if>
</xsl:template>
<xsl:template match="STEP">
<hr />
<div>
<xsl:if test="@result='True'">
<xsl:attribute name="style">
padding:2px 2px; background-color:#B3E8B8
</xsl:attribute>
</xsl:if>
<xsl:if test="@result='False'">
<xsl:attribute name="style">
padding:2px 2px; background-color:#F5BCBD
</xsl:attribute>
</xsl:if>
<table border="0">
<tr>
<td><span class="STYLE1">步骤:</span></td>
<td><span class="STYLE1"><xsl:value-of select="@title"/></span></td>
<td><span class="STYLE1"> <xsl:value-of select="@time"/></span></td>
<td><span class="STYLE1">
<xsl:apply-templates select="@result"/>
</span></td>
</tr>
</table>
</div>
<hr />
<table>
<xsl:apply-templates/>
</table>
</xsl:template>
<xsl:template match="DEBUG">
<tr>
<td valign="top"><strong>DEBUG:</strong></td>
<td><xsl:value-of select="text()"/></td>
</tr>
</xsl:template>
<xsl:template match="INFO">
<tr>
<!--<td valign="top"><span class="STYLE4">12:12:11</span></td> -->
<td valign="top"><strong>INFO:</strong></td>
<td><xsl:value-of select="text()"/></td>
</tr>
</xsl:template>
<xsl:template match="WARNING">
<tr>
<!--<td valign="top"><span class="STYLE4">12:12:11</span></td> -->
<td valign="top"><span class="STYLE6">WARNING:</span></td>
<td><xsl:value-of select="text()"/></td>
</tr>
</xsl:template>
<xsl:template match="ERROR">
<tr>
<!--<td valign="top"><span class="STYLE4">12:12:11</span></td> -->
<td valign="top"><span class="STYLE5">ERROR:</span></td>
<td>
<xsl:call-template name="break_lines" />
<pre>
<xsl:value-of select="EXCEPT/text()"/>
</pre>
<table border="0">
<xsl:apply-templates select="EXPECT"/>
<xsl:apply-templates select="ACTUAL"/>
</table>
<xsl:for-each select="ATTACHMENT">
<a>
<xsl:attribute name="href">
<xsl:value-of select="@filepath"/>
</xsl:attribute>
[<xsl:value-of select="text()"/>]
</a>
</xsl:for-each>
</td>
</tr>
</xsl:template>
<xsl:template match="EXPECT">
<tr>
<td> 期望值:</td>
<td><xsl:value-of select="text()"/></td>
</tr>
</xsl:template>
<xsl:template match="ACTUAL">
<tr>
<td> 实际值:</td>
<td><xsl:value-of select="text()"/></td>
</tr>
</xsl:template>
</xsl:stylesheet>"""
class XMLTestResultFactory(ITestResultFactory):
'''XML形式TestResult工厂
'''
BAD_CHARS = r'\/*?:<>"|~'
TRANS = string.maketrans(BAD_CHARS, '='*len(BAD_CHARS))
def create(self, testcase ):
'''创建TestResult对象
:param testcase: 测试用例
:type testcase: TestCase
:return TestResult
'''
time_str=datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3]
filename = '%s_%s.xml' % (testcase.test_name.translate(self.TRANS),time_str)
return testresult.XmlResult(filename)
class XMLTestReport(ITestReport):
'''XML形式的测试报告
'''
def __init__(self):
'''构造函数
'''
self._xmldoc = dom.Document()
self._xmldoc.appendChild(self._xmldoc.createProcessingInstruction("xml-stylesheet", 'type="text/xsl" href="TestReport.xsl"'))
self._runrstnode = self._xmldoc.createElement("RunResult")
self._xmldoc.appendChild(self._runrstnode)
self._result_factory = XMLTestResultFactory()
def begin_report(self):
'''开始测试执行
'''
self._time_start = datetime.now()
xmltpl = "<TestEnv><PC>%s</PC><OS>%s</OS></TestEnv>"
hostname = socket.gethostname()
if sys.platform == 'win32':
osver = os.popen("ver").read().decode('gbk').encode('utf-8')
else:
osver = os.uname() # @UndefinedVariable
envxml = dom.parseString(xmltpl % (hostname, osver))
self._runrstnode.appendChild(envxml.childNodes[0])
def end_report(self):
'''结束测试执行
:param passed: 测试是否通过
:type passed: boolean
'''
time_end = datetime.now()
timexml = "<RunTime><StartTime>%s</StartTime><EndTime>%s</EndTime><Duration>%s</Duration></RunTime>"
timexml = timexml % (self._time_start.strftime("%Y-%m-%d %H:%M:%S"), time_end.strftime("%Y-%m-%d %H:%M:%S"), str(time_end-self._time_start).split('.')[0] )
timenodes = dom.parseString(timexml)
self._runrstnode.appendChild(timenodes.childNodes[0])
xmldata = self._xmldoc.toprettyxml(indent=" ",
newl="\n",
encoding='utf-8')
with codecs.open('TestReport.xml', 'w') as fd:
fd.write(xmldata)
with codecs.open('TestReport.xsl', 'w') as fd:
fd.write(REPORT_XSL)
with codecs.open('TestResult.xsl', 'w') as fd:
fd.write(RESULT_XLS)
def log_test_result(self, testcase, testresult ):
'''记录一个测试结果
:param testcase: 测试用例
:type testcase: TestCase
:param testresult: 测试结果
:type testresult: XmlResult
'''
casemark = cgi.escape(testcase.test_doc)
nodestr = """<TestResult result="%s" log="%s" status="%s">%s</TestResult>
""" % (testresult.passed, testresult.file_path, testcase.status, casemark)
doc2 = dom.parseString(nodestr)
resultNode = doc2.childNodes[0]
resultNode.setAttribute("name", _to_unicode(saxutils.escape(testcase.test_name)))
resultNode.setAttribute("owner", _to_unicode(saxutils.escape(testcase.owner)))
self._runrstnode.appendChild(resultNode)
def log_record(self, level, tag, msg, record={}):
'''增加一个记录
:param level: 日志级别
:param msg: 日志消息
:param tag: 日志标签
:param record: 日志记录信息
:type level: string
:type tag: string
:type msg: string
:type record: dict
'''
if tag == 'LOADER' and level == EnumLogLevel.ERROR:
if record.has_key('error_testname') and record.has_key('error'):
testname = record['error_testname']
mdfailsnode = self._xmldoc.createElement("LoadFailure")
self._runrstnode.appendChild(mdfailsnode)
logfile = '%s.log' % testname
xmltpl = """<Module name="%s" log="%s"/>""" % (testname, logfile)
mdfailsnode.appendChild(dom.parseString(xmltpl).childNodes[0])
with open(logfile, 'w') as fd:
fd.write(record['error'])
def log_filtered_test(self, loader, testcase, reason):
'''记录一个被过滤的测试用例
:param loader: 用例加载器
:type loader: TestLoader
:param testcase: 测试用例
:type testcase: TestCase
:param reason: 过滤原因
:type reason: str
'''
nodestr = """<FilterTest name="%s" reason="%s"></FilterTest>
""" % (
_to_unicode(saxutils.escape(testcase.test_name)),
_to_unicode(saxutils.escape(reason))
)
doc2 = dom.parseString(nodestr)
filterNode = doc2.childNodes[0]
self._runrstnode.appendChild(filterNode)
def log_load_error(self, loader, name, error):
'''记录一个加载失败的用例或用例集
:param loader: 用例加载器
:type loader: TestLoader
:param name: 名称
:type name: str
:param error: 错误信息
:type error: str
'''
log_file = "%s.log" % name
nodestr = """<LoadTestError name="%s" log="%s"></LoadTestError>
""" % (
_to_unicode(saxutils.escape(name)),
log_file,
)
doc2 = dom.parseString(nodestr)
errNode = doc2.childNodes[0]
self._runrstnode.appendChild(errNode)
with open(log_file, 'w') as fd:
fd.write(error)
def get_testresult_factory(self):
'''获取对应的TestResult工厂
:returns ITestResultFactory
'''
return self._result_factory
@classmethod
def get_parser(cls):
'''获取命令行参数解析器(如果实现)
:returns: 解析器对象
:rtype: argparse.ArgumentParser
'''
return argparse.ArgumentParser(usage=report_usage)
@classmethod
def parse_args(cls, args_string):
'''通过命令行参数构造对象
:returns: 测试报告
:rtype: cls
'''
return cls()
class JSONTestResultFactory(ITestResultFactory):
'''JSON形式TestResult工厂
'''
def create(self, testcase ):
'''创建TestResult对象
:param testcase: 测试用例
:type testcase: TestCase
:return TestResult
'''
return testresult.JSONResult(testcase)
class JSONTestReport(ITestReport):
'''JSON格式的测试报告
'''
def __init__(self, name="调试测试报告", fd=None ):
'''构造函数
:param name: 报告名
:type name: str
:param fd: 输出流
:type fd: file object
'''
if fd is None:
self._fd = sys.stdout
else:
self._fd = fd
self._results = []
self._logs = []
self._filtered_tests = []
self._load_errors = []
self._testcases = []
self._data = {
"version": "1.0",
"summary": {
"tool": "QTA",
"name": name,
},
"results": self._results,
"logs": self._logs,
"filtered_tests": self._filtered_tests,
"load_errors": self._load_errors,
"loaded_testcases": self._testcases
}
self._testcase_total = 0
self._testcase_passed = 0
def begin_report(self):
'''开始测试执行
'''
self._data["summary"]["start_time"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def end_report(self):
'''结束测试执行
:param passed: 测试是否通过
:type passed: boolean
'''
self._data["summary"]["testcase_total"] = self._testcase_total
self._data["summary"]["testcase_passed"] = self._testcase_passed
self._data["summary"]["succeed"] = self._testcase_passed == self._testcase_total
self._data["summary"]["end_time"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
json.dump(self._data, self._fd)
def log_test_result(self, testcase, testresult ):
'''记录一个测试结果
:param testcase: 测试用例
:type testcase: TestCase
:param testresult: 测试结果
:type testresult: TestResult
'''
self._testcase_total += 1
if testresult.passed:
self._testcase_passed += 1
self._results.append(testresult.get_data())
def log_record(self, level, tag, msg, record):
'''增加一个记录
:param level: 日志级别
:param msg: 日志消息
:param tag: 日志标签
:param record: 日志记录信息
:type level: string
:type tag: string
:type msg: string
:type record: dict
'''
self._logs.append({
"level": level,
"tag": tag,
"message": msg,
"record": record
})
def log_loaded_tests(self, loader, testcases):
'''记录加载成功的用例
:param loader: 用例加载器
:type loader: TestLoader
:param testcases: 测试用例列表
:type testcases: list
'''
self._testcases += [
{"name": testcase.test_name}
for testcase in testcases
]
def log_filtered_test(self, loader, testcase, reason):
'''记录一个被过滤的测试用例
:param loader: 用例加载器
:type loader: TestLoader
:param testcase: 测试用例
:type testcase: TestCase
:param reason: 过滤原因
:type reason: str
'''
self._filtered_tests.append({
"name": testcase.test_name,
"reason": reason
})
def log_load_error(self, loader, name, error):
'''记录一个加载失败的用例或用例集
:param loader: 用例加载器
:type loader: TestLoader
:param name: 名称
:type name: str
:param error: 错误信息
:type error: str
'''
self._load_errors.append({
"name": name,
"error": error
})
def get_testresult_factory(self):
'''获取对应的TestResult工厂
:returns ITestResultFactory
'''
return JSONTestResultFactory()
@classmethod
def get_parser(cls):
'''获取命令行参数解析器(如果实现)
:returns: 解析器对象
:rtype: argparse.ArgumentParser
'''
parser = argparse.ArgumentParser(usage=report_usage)
parser.add_argument("--name", help="report title", default="Debug test report")
parser.add_argument("-o", "--output", help="output file path, can be stdout & stderr", default="stdout")
return parser
@classmethod
def parse_args(cls, args_string):
'''通过命令行参数构造对象
:returns: 测试报告
:rtype: cls
'''
args = cls.get_parser().parse_args(args_string)
if args.output == 'stdout':
fd = sys.stdout
elif args.output == 'stderr':
fd = sys.stderr
else:
fd = open(args.output, 'w')
return cls(
name=args.name,
fd=fd)
def __init_report_types():
global report_types
if report_types:
return
report_types.update({
"empty": EmptyTestReport,
"stream": StreamTestReport,
"xml": XMLTestReport,
"json": JSONTestReport,
})
# Register other `ITestReport` implementiations from entry points
for ep in pkg_resources.iter_entry_points(REPORT_ENTRY_POINT):
if ep.name not in report_types:
report_types[ep.name] = ep.load()
__init_report_types()
del __init_report_types
|
[
"xml.dom.minidom.Document",
"json.dump",
"locale.getdefaultlocale",
"argparse.ArgumentParser",
"xml.dom.minidom.parseString",
"testbase.testresult.StreamResult",
"testbase.testresult.JSONResult",
"codecs.open",
"testbase.testresult.get_data",
"os.uname",
"testbase.testresult.EmptyResult",
"os.popen",
"xml.sax.saxutils.escape",
"testbase.testresult.XmlResult",
"socket.gethostname",
"cgi.escape",
"datetime.datetime.now",
"pkg_resources.iter_entry_points"
] |
[((1148, 1173), 'locale.getdefaultlocale', 'locale.getdefaultlocale', ([], {}), '()\n', (1171, 1173), False, 'import locale\n'), ((36155, 36206), 'pkg_resources.iter_entry_points', 'pkg_resources.iter_entry_points', (['REPORT_ENTRY_POINT'], {}), '(REPORT_ENTRY_POINT)\n', (36186, 36206), False, 'import pkg_resources\n'), ((8285, 8328), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': 'report_usage'}), '(usage=report_usage)\n', (8308, 8328), False, 'import argparse\n'), ((8924, 8961), 'testbase.testresult.StreamResult', 'testresult.StreamResult', (['self._stream'], {}), '(self._stream)\n', (8947, 8961), False, 'from testbase import testresult\n'), ((10693, 10707), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10705, 10707), False, 'from datetime import datetime\n'), ((10939, 10953), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10951, 10953), False, 'from datetime import datetime\n'), ((14177, 14220), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': 'report_usage'}), '(usage=report_usage)\n', (14200, 14220), False, 'import argparse\n'), ((25527, 25557), 'testbase.testresult.XmlResult', 'testresult.XmlResult', (['filename'], {}), '(filename)\n', (25547, 25557), False, 'from testbase import testresult\n'), ((25694, 25708), 'xml.dom.minidom.Document', 'dom.Document', ([], {}), '()\n', (25706, 25708), True, 'import xml.dom.minidom as dom\n'), ((26109, 26123), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26121, 26123), False, 'from datetime import datetime\n'), ((26213, 26233), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (26231, 26233), False, 'import socket\n'), ((26427, 26470), 'xml.dom.minidom.parseString', 'dom.parseString', (['(xmltpl % (hostname, osver))'], {}), '(xmltpl % (hostname, osver))\n', (26442, 26470), True, 'import xml.dom.minidom as dom\n'), ((26674, 26688), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26686, 26688), False, 'from datetime import datetime\n'), ((26982, 27006), 'xml.dom.minidom.parseString', 'dom.parseString', (['timexml'], {}), '(timexml)\n', (26997, 27006), True, 'import xml.dom.minidom as dom\n'), ((27758, 27787), 'cgi.escape', 'cgi.escape', (['testcase.test_doc'], {}), '(testcase.test_doc)\n', (27768, 27787), False, 'import cgi\n'), ((27968, 27992), 'xml.dom.minidom.parseString', 'dom.parseString', (['nodestr'], {}), '(nodestr)\n', (27983, 27992), True, 'import xml.dom.minidom as dom\n'), ((29701, 29725), 'xml.dom.minidom.parseString', 'dom.parseString', (['nodestr'], {}), '(nodestr)\n', (29716, 29725), True, 'import xml.dom.minidom as dom\n'), ((30286, 30310), 'xml.dom.minidom.parseString', 'dom.parseString', (['nodestr'], {}), '(nodestr)\n', (30301, 30310), True, 'import xml.dom.minidom as dom\n'), ((30781, 30824), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': 'report_usage'}), '(usage=report_usage)\n', (30804, 30824), False, 'import argparse\n'), ((31270, 31301), 'testbase.testresult.JSONResult', 'testresult.JSONResult', (['testcase'], {}), '(testcase)\n', (31291, 31301), False, 'from testbase import testresult\n'), ((32831, 32862), 'json.dump', 'json.dump', (['self._data', 'self._fd'], {}), '(self._data, self._fd)\n', (32840, 32862), False, 'import json\n'), ((35092, 35135), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': 'report_usage'}), '(usage=report_usage)\n', (35115, 35135), False, 'import argparse\n'), ((6830, 6854), 'testbase.testresult.EmptyResult', 'testresult.EmptyResult', ([], {}), '()\n', (6852, 6854), False, 'from testbase import testresult\n'), ((26377, 26387), 'os.uname', 'os.uname', ([], {}), '()\n', (26385, 26387), False, 'import os\n'), ((27266, 27300), 'codecs.open', 'codecs.open', (['"""TestReport.xml"""', '"""w"""'], {}), "('TestReport.xml', 'w')\n", (27277, 27300), False, 'import codecs\n'), ((27351, 27385), 'codecs.open', 'codecs.open', (['"""TestReport.xsl"""', '"""w"""'], {}), "('TestReport.xsl', 'w')\n", (27362, 27385), False, 'import codecs\n'), ((27439, 27473), 'codecs.open', 'codecs.open', (['"""TestResult.xsl"""', '"""w"""'], {}), "('TestResult.xsl', 'w')\n", (27450, 27473), False, 'import codecs\n'), ((33222, 33243), 'testbase.testresult.get_data', 'testresult.get_data', ([], {}), '()\n', (33241, 33243), False, 'from testbase import testresult\n'), ((28085, 28120), 'xml.sax.saxutils.escape', 'saxutils.escape', (['testcase.test_name'], {}), '(testcase.test_name)\n', (28100, 28120), True, 'import xml.sax.saxutils as saxutils\n'), ((28176, 28207), 'xml.sax.saxutils.escape', 'saxutils.escape', (['testcase.owner'], {}), '(testcase.owner)\n', (28191, 28207), True, 'import xml.sax.saxutils as saxutils\n'), ((32339, 32353), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (32351, 32353), False, 'from datetime import datetime\n'), ((32778, 32792), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (32790, 32792), False, 'from datetime import datetime\n'), ((25378, 25392), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (25390, 25392), False, 'from datetime import datetime\n'), ((29589, 29624), 'xml.sax.saxutils.escape', 'saxutils.escape', (['testcase.test_name'], {}), '(testcase.test_name)\n', (29604, 29624), True, 'import xml.sax.saxutils as saxutils\n'), ((29651, 29674), 'xml.sax.saxutils.escape', 'saxutils.escape', (['reason'], {}), '(reason)\n', (29666, 29674), True, 'import xml.sax.saxutils as saxutils\n'), ((30215, 30236), 'xml.sax.saxutils.escape', 'saxutils.escape', (['name'], {}), '(name)\n', (30230, 30236), True, 'import xml.sax.saxutils as saxutils\n'), ((29057, 29080), 'xml.dom.minidom.parseString', 'dom.parseString', (['xmltpl'], {}), '(xmltpl)\n', (29072, 29080), True, 'import xml.dom.minidom as dom\n'), ((26290, 26305), 'os.popen', 'os.popen', (['"""ver"""'], {}), "('ver')\n", (26298, 26305), False, 'import os\n')]
|
import pygame
from config import UPDATE_RATE
from common import render_text_center
def display_msg_affective_disscussion(screen, msg: str, milliseconds: int):
start_ticks = pygame.time.get_ticks()
clock = pygame.time.Clock()
while pygame.time.get_ticks() - start_ticks < milliseconds:
render_text_center(msg, (1250, 90), screen, font_size = 55 , x_offset = 0, y_offset=0)
pygame.event.get()
clock.tick(UPDATE_RATE)
|
[
"common.render_text_center",
"pygame.time.get_ticks",
"pygame.time.Clock",
"pygame.event.get"
] |
[((178, 201), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (199, 201), False, 'import pygame\n'), ((215, 234), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (232, 234), False, 'import pygame\n'), ((307, 392), 'common.render_text_center', 'render_text_center', (['msg', '(1250, 90)', 'screen'], {'font_size': '(55)', 'x_offset': '(0)', 'y_offset': '(0)'}), '(msg, (1250, 90), screen, font_size=55, x_offset=0,\n y_offset=0)\n', (325, 392), False, 'from common import render_text_center\n'), ((402, 420), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (418, 420), False, 'import pygame\n'), ((245, 268), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (266, 268), False, 'import pygame\n')]
|
# Face recognization
import cv2
import os
alg = "haarcascade_frontalface_default.xml"
haar = cv2.CascadeClassifier(alg)
cam = cv2.VideoCapture(0)
path = "dataset"
if not os.path.isdir(path):
os.mkdir(path)
(width,height) = (100,100)
count = 0
while count<100:
count+=1
print(count)
_,img = cam.read()
grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = haar.detectMultiScale(grayImg,1.3,5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),3)
onlyFace = grayImg[y:y+h,x:x+w]
resizeImg = cv2.resize(onlyFace, (width,height))
cv2.imwrite("%s/%s.jpg"%(path,count), resizeImg)
cv2.imshow("faceDection", img)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
print("Sucessfully colleted dataset")
cam.release()
cv2.destroyAllWindows()
|
[
"os.mkdir",
"os.path.isdir",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.rectangle",
"cv2.CascadeClassifier",
"cv2.destroyAllWindows",
"cv2.resize"
] |
[((94, 120), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['alg'], {}), '(alg)\n', (115, 120), False, 'import cv2\n'), ((127, 146), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (143, 146), False, 'import cv2\n'), ((761, 784), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (782, 784), False, 'import cv2\n'), ((173, 192), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (186, 192), False, 'import os\n'), ((195, 209), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (203, 209), False, 'import os\n'), ((321, 358), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (333, 358), False, 'import cv2\n'), ((620, 650), 'cv2.imshow', 'cv2.imshow', (['"""faceDection"""', 'img'], {}), "('faceDection', img)\n", (630, 650), False, 'import cv2\n'), ((433, 491), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(3)'], {}), '(img, (x, y), (x + w, y + h), (0, 255, 0), 3)\n', (446, 491), False, 'import cv2\n'), ((528, 565), 'cv2.resize', 'cv2.resize', (['onlyFace', '(width, height)'], {}), '(onlyFace, (width, height))\n', (538, 565), False, 'import cv2\n'), ((567, 618), 'cv2.imwrite', 'cv2.imwrite', (["('%s/%s.jpg' % (path, count))", 'resizeImg'], {}), "('%s/%s.jpg' % (path, count), resizeImg)\n", (578, 618), False, 'import cv2\n'), ((658, 672), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (669, 672), False, 'import cv2\n')]
|
"""
провести в порядок тесты
"""
import unittest
import os
import shutil
import csv
from csvdb import CSVDB
class TestCSVDB(unittest.TestCase):
"""
тесты для проверки работы объекта CSVDB
"""
def remove_dbdir(self):
"""
удаление БД папки даже она существует
"""
if os.path.exists(self.tst_name_db):
shutil.rmtree(self.tst_name_db)
def create_dbdir(self):
"""
создание БД папки если ёё нет, если есть то удаляется и заново создается
"""
if os.path.exists(self.tst_name_db):
shutil.rmtree(self.tst_name_db)
os.mkdir(self.tst_name_db)
print("File ", self.file1)
# создаем простой файл внутри папки
with open(self.file1, "w") as f:
f.write("Tecт")
def setUp(self) -> None:
self.tst_name_db = "my_test_db"
self.file1 = f"{self.tst_name_db}/file1.csv"
self.tst_table1 = 'table1'
def tearDown(self) -> None:
self.remove_dbdir()
def test_initdb_noexist_dirdb(self):
"""
проверка правильно ли отрабатывается инициализация БД когда папки БД не существует
"""
# инициализация тестового окружения
self.remove_dbdir()
db = CSVDB(name_db=self.tst_name_db)
flag = os.path.exists(self.tst_name_db) and os.path.isdir(self.tst_name_db)
self.assertEqual(True, flag)
def test_initdb_exist_dirdb_force(self):
"""
проверка правильно ли отрабатывается инициализация БД когда папки БД существует и нужно перезаписать
"""
# инициализация тестового окружения
self.create_dbdir()
db = CSVDB(name_db=self.tst_name_db, force=True)
flag_dir = os.path.exists(self.tst_name_db) and os.path.isdir(self.tst_name_db)
flag_file = os.path.exists(self.file1) and os.path.isfile(self.tst_name_db)
self.assertEqual(True, flag_dir)
self.assertEqual(False, flag_file)
def test_initdb_exist_dirdb_noforce(self):
"""
проверка правильно ли отрабатывается инициализация БД когда папки БД существует и НЕ нужно перезаписать
"""
# инициализация тестового окружения
self.create_dbdir()
db = CSVDB(name_db=self.tst_name_db, force=False)
flag_dir = os.path.exists(self.tst_name_db) and os.path.isdir(self.tst_name_db)
flag_file = os.path.exists(self.file1) and os.path.isfile(self.file1)
self.assertEqual(True, flag_dir)
self.assertEqual(True, flag_file)
def test_create_table(self):
"""
создание таблицы
"""
self.remove_dbdir()
db = CSVDB(name_db=self.tst_name_db, force=False)
headers_original = ['NUMBER', 'FIO', 'ROLE']
db.create_table(name_table=self.tst_table1, colums=headers_original)
full_path_table1 = db.full_path(self.tst_table1)
flag_name_table = db.tables[0]
flag_exist_table = os.path.exists(full_path_table1)
print(full_path_table1)
# проверяем что файл присутствует
self.assertEqual(True, flag_exist_table)
# проверяем заголовки файла таблицы
headers = []
with open(full_path_table1) as f:
reader = csv.DictReader(f, delimiter=";")
headers = reader.fieldnames
self.assertEqual(headers, headers_original)
def test_create_table_exist_table(self):
"""
создание таблицы, файл которой уже есть
"""
self.remove_dbdir()
db = CSVDB(name_db=self.tst_name_db, force=False)
headers_original = ['NUMBER', 'FIO', 'ROLE']
flag_noexist = db.create_table(name_table=self.tst_table1, colums=headers_original)
flag_exist = db.create_table(name_table=self.tst_table1, colums=headers_original)
self.assertEqual(True, flag_noexist)
self.assertEqual(False, flag_exist)
def test_insert_data(self):
"""
тест вставки данных
:return:
"""
headers_original = ['NUMBER', 'FIO', 'ROLE']
data_original = {'NUMBER': '1', 'FIO': '<NAME>', 'ROLE': 'Admin'}
self.remove_dbdir()
db = CSVDB(name_db=self.tst_name_db, force=False)
flag_noexist = db.create_table(name_table=self.tst_table1, colums=headers_original)
full_path_table1 = db.full_path(self.tst_table1)
db.insert_data(name_table=self.tst_table1, data=data_original)
result_data = db.getall(name_table=self.tst_table1)
self.assertEqual(result_data[0], data_original)
# проверяем что запись одна
self.assertEqual(1, len(result_data))
# добавляем ещё одну запись
db.insert_data(name_table=self.tst_table1, data=data_original)
result_data = db.getall(name_table=self.tst_table1)
self.assertEqual(2, len(result_data))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.mkdir",
"csvdb.CSVDB",
"os.path.isdir",
"csv.DictReader",
"os.path.exists",
"os.path.isfile",
"shutil.rmtree"
] |
[((323, 355), 'os.path.exists', 'os.path.exists', (['self.tst_name_db'], {}), '(self.tst_name_db)\n', (337, 355), False, 'import os\n'), ((546, 578), 'os.path.exists', 'os.path.exists', (['self.tst_name_db'], {}), '(self.tst_name_db)\n', (560, 578), False, 'import os\n'), ((633, 659), 'os.mkdir', 'os.mkdir', (['self.tst_name_db'], {}), '(self.tst_name_db)\n', (641, 659), False, 'import os\n'), ((1275, 1306), 'csvdb.CSVDB', 'CSVDB', ([], {'name_db': 'self.tst_name_db'}), '(name_db=self.tst_name_db)\n', (1280, 1306), False, 'from csvdb import CSVDB\n'), ((1699, 1742), 'csvdb.CSVDB', 'CSVDB', ([], {'name_db': 'self.tst_name_db', 'force': '(True)'}), '(name_db=self.tst_name_db, force=True)\n', (1704, 1742), False, 'from csvdb import CSVDB\n'), ((2277, 2321), 'csvdb.CSVDB', 'CSVDB', ([], {'name_db': 'self.tst_name_db', 'force': '(False)'}), '(name_db=self.tst_name_db, force=False)\n', (2282, 2321), False, 'from csvdb import CSVDB\n'), ((2698, 2742), 'csvdb.CSVDB', 'CSVDB', ([], {'name_db': 'self.tst_name_db', 'force': '(False)'}), '(name_db=self.tst_name_db, force=False)\n', (2703, 2742), False, 'from csvdb import CSVDB\n'), ((3000, 3032), 'os.path.exists', 'os.path.exists', (['full_path_table1'], {}), '(full_path_table1)\n', (3014, 3032), False, 'import os\n'), ((3572, 3616), 'csvdb.CSVDB', 'CSVDB', ([], {'name_db': 'self.tst_name_db', 'force': '(False)'}), '(name_db=self.tst_name_db, force=False)\n', (3577, 3616), False, 'from csvdb import CSVDB\n'), ((4216, 4260), 'csvdb.CSVDB', 'CSVDB', ([], {'name_db': 'self.tst_name_db', 'force': '(False)'}), '(name_db=self.tst_name_db, force=False)\n', (4221, 4260), False, 'from csvdb import CSVDB\n'), ((4938, 4953), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4951, 4953), False, 'import unittest\n'), ((369, 400), 'shutil.rmtree', 'shutil.rmtree', (['self.tst_name_db'], {}), '(self.tst_name_db)\n', (382, 400), False, 'import shutil\n'), ((592, 623), 'shutil.rmtree', 'shutil.rmtree', (['self.tst_name_db'], {}), '(self.tst_name_db)\n', (605, 623), False, 'import shutil\n'), ((1322, 1354), 'os.path.exists', 'os.path.exists', (['self.tst_name_db'], {}), '(self.tst_name_db)\n', (1336, 1354), False, 'import os\n'), ((1359, 1390), 'os.path.isdir', 'os.path.isdir', (['self.tst_name_db'], {}), '(self.tst_name_db)\n', (1372, 1390), False, 'import os\n'), ((1762, 1794), 'os.path.exists', 'os.path.exists', (['self.tst_name_db'], {}), '(self.tst_name_db)\n', (1776, 1794), False, 'import os\n'), ((1799, 1830), 'os.path.isdir', 'os.path.isdir', (['self.tst_name_db'], {}), '(self.tst_name_db)\n', (1812, 1830), False, 'import os\n'), ((1852, 1878), 'os.path.exists', 'os.path.exists', (['self.file1'], {}), '(self.file1)\n', (1866, 1878), False, 'import os\n'), ((1883, 1915), 'os.path.isfile', 'os.path.isfile', (['self.tst_name_db'], {}), '(self.tst_name_db)\n', (1897, 1915), False, 'import os\n'), ((2341, 2373), 'os.path.exists', 'os.path.exists', (['self.tst_name_db'], {}), '(self.tst_name_db)\n', (2355, 2373), False, 'import os\n'), ((2378, 2409), 'os.path.isdir', 'os.path.isdir', (['self.tst_name_db'], {}), '(self.tst_name_db)\n', (2391, 2409), False, 'import os\n'), ((2431, 2457), 'os.path.exists', 'os.path.exists', (['self.file1'], {}), '(self.file1)\n', (2445, 2457), False, 'import os\n'), ((2462, 2488), 'os.path.isfile', 'os.path.isfile', (['self.file1'], {}), '(self.file1)\n', (2476, 2488), False, 'import os\n'), ((3286, 3318), 'csv.DictReader', 'csv.DictReader', (['f'], {'delimiter': '""";"""'}), "(f, delimiter=';')\n", (3300, 3318), False, 'import csv\n')]
|
from functions import units
from functions import gge_dictionary
from functions import unit_cost_dictionary
from functions import gge_cost_dictionary
from functions import fuel_equivalent
from functions import fuel_equivalent_cost
from functions import co2_equivalent
from functions import co2_emissions
import pytest
# ============================================================================
# Tests for units()
# ============================================================================
def test_units():
"""
Should not raise an error if units is correct.
"""
test = units()
assert test == {
'Gasoline': 'gallon',
'Diesel': 'gallon',
'E85': 'gallon',
'Hydrogen': 'kg',
'Electricity': 'kWh'
}
# ============================================================================
# Tests for gge_dictionary()
# ============================================================================
def test_gge_dictionary():
"""
Should not raise an error if gge_dictionary is correctly formatted.
"""
test = gge_dictionary()
assert test == {
'Gasoline': 1.0,
'Diesel': 1.155,
'E85': 0.734,
'Hydrogen': 1.019,
'Electricity': 0.031
}
# ============================================================================
# Tests for unit_cost_dictionary()
# ============================================================================
def test_unit_cost_dictionary():
"""
Should not raise an error if unit_cost_dictionary is correctly formatted.
"""
test = unit_cost_dictionary()
assert test == {
'Gasoline': 2.23,
'Diesel': 2.41,
'E85': 1.71,
'Hydrogen': 13.99,
'Electricity': 0.0426
}
# ============================================================================
# Tests for gge_cost_dictionary()
# ============================================================================
def test_gge_cost_dictionary():
"""
Should not raise an error if gge_cost_dictionary is correctly formatted.
"""
test = gge_cost_dictionary()
assert test == {
'Gasoline': 2.23,
'Diesel': 2.0865800865800868,
'E85': 2.329700272479564,
'Hydrogen': 13.729146221786067,
'Electricity': 1.3741935483870968
}
# ============================================================================
# Tests for fuel_equivalent()
# ============================================================================
def test_fuel_equivalent_1():
"""
Should raise an IndexError if fuel_equivalent is properly set up.
"""
fuel_test = 'Plutonium'
with pytest.raises(IndexError, match='Plutonium not supported.'):
fuel_equivalent(fuel_test)
def test_fuel_equivalent_2():
"""
Should raise a TypeError if fuel_equivalent is properly set up.
"""
Hydrogen = 4
fuel_test = Hydrogen
with pytest.raises(TypeError, match='Please'):
fuel_equivalent(fuel_test)
# ============================================================================
# Tests for fuel_equivalent_cost()
# ============================================================================
def test_fuel_equivalent_cost_1():
"""
Should raise an IndexError if fuel_equivalent_cost is properly set up.
"""
fuel_test = 'Plutonium'
with pytest.raises(IndexError, match='Plutonium not supported.'):
fuel_equivalent(fuel_test)
def test_fuel_equivalent_cost_2():
"""
Should raise a TypeError if fuel_equivalent_cost is properly set up.
"""
Hydrogen = 4
fuel_test = Hydrogen
with pytest.raises(TypeError, match='Please'):
fuel_equivalent(fuel_test)
# ============================================================================
# Tests for co2_equivalent()
# ============================================================================
def test_co2_equivalent():
"""
Should not raise an error if co2_equivalent is properly set up.
"""
test = co2_equivalent()
assert test == {
'Gasoline': 8.89,
'Diesel': 10.16,
'E85': 6.221,
'Hydrogen': 0,
'Electricity': 0
}
# ============================================================================
# Tests for co2_emissions()
# ============================================================================
def test_co2_emissions_1():
"""
Should raise an IndexError if co2_emissions is set up properly.
"""
fuel_test = 'Plutonium'
with pytest.raises(IndexError, match='Plutonium not supported.'):
fuel_equivalent(fuel_test)
def test_co2_emissions_2():
"""
Should raise a TypeError if co2_emissions is set up properly.
"""
Hydrogen = 4
fuel_test = Hydrogen
with pytest.raises(TypeError, match='Please'):
fuel_equivalent(fuel_test)
|
[
"functions.unit_cost_dictionary",
"functions.gge_dictionary",
"functions.fuel_equivalent",
"functions.units",
"pytest.raises",
"functions.co2_equivalent",
"functions.gge_cost_dictionary"
] |
[((598, 605), 'functions.units', 'units', ([], {}), '()\n', (603, 605), False, 'from functions import units\n'), ((1091, 1107), 'functions.gge_dictionary', 'gge_dictionary', ([], {}), '()\n', (1105, 1107), False, 'from functions import gge_dictionary\n'), ((1601, 1623), 'functions.unit_cost_dictionary', 'unit_cost_dictionary', ([], {}), '()\n', (1621, 1623), False, 'from functions import unit_cost_dictionary\n'), ((2114, 2135), 'functions.gge_cost_dictionary', 'gge_cost_dictionary', ([], {}), '()\n', (2133, 2135), False, 'from functions import gge_cost_dictionary\n'), ((4069, 4085), 'functions.co2_equivalent', 'co2_equivalent', ([], {}), '()\n', (4083, 4085), False, 'from functions import co2_equivalent\n'), ((2692, 2751), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': '"""Plutonium not supported."""'}), "(IndexError, match='Plutonium not supported.')\n", (2705, 2751), False, 'import pytest\n'), ((2761, 2787), 'functions.fuel_equivalent', 'fuel_equivalent', (['fuel_test'], {}), '(fuel_test)\n', (2776, 2787), False, 'from functions import fuel_equivalent\n'), ((2959, 2999), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Please"""'}), "(TypeError, match='Please')\n", (2972, 2999), False, 'import pytest\n'), ((3009, 3035), 'functions.fuel_equivalent', 'fuel_equivalent', (['fuel_test'], {}), '(fuel_test)\n', (3024, 3035), False, 'from functions import fuel_equivalent\n'), ((3399, 3458), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': '"""Plutonium not supported."""'}), "(IndexError, match='Plutonium not supported.')\n", (3412, 3458), False, 'import pytest\n'), ((3468, 3494), 'functions.fuel_equivalent', 'fuel_equivalent', (['fuel_test'], {}), '(fuel_test)\n', (3483, 3494), False, 'from functions import fuel_equivalent\n'), ((3677, 3717), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Please"""'}), "(TypeError, match='Please')\n", (3690, 3717), False, 'import pytest\n'), ((3727, 3753), 'functions.fuel_equivalent', 'fuel_equivalent', (['fuel_test'], {}), '(fuel_test)\n', (3742, 3753), False, 'from functions import fuel_equivalent\n'), ((4577, 4636), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': '"""Plutonium not supported."""'}), "(IndexError, match='Plutonium not supported.')\n", (4590, 4636), False, 'import pytest\n'), ((4646, 4672), 'functions.fuel_equivalent', 'fuel_equivalent', (['fuel_test'], {}), '(fuel_test)\n', (4661, 4672), False, 'from functions import fuel_equivalent\n'), ((4841, 4881), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Please"""'}), "(TypeError, match='Please')\n", (4854, 4881), False, 'import pytest\n'), ((4891, 4917), 'functions.fuel_equivalent', 'fuel_equivalent', (['fuel_test'], {}), '(fuel_test)\n', (4906, 4917), False, 'from functions import fuel_equivalent\n')]
|
# SPDX-License-Identifier: MIT
# Copyright © 2020 <NAME>
"""Functions to fix various known issues with exported TFJS models"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import base64
from typing import Any, Dict, List, Optional
import tfjs_graph_converter.common as common
def _find_if_has_key(obj: Dict[str, Any], key: str,
of_type: Optional[type] = None) -> List[Any]:
"""
Recursively find all objects with a given key in a dictionary
Args:
obj: Dictionary to search
key: Key to find
of_type: [optional] Type of the referenced item
Returns:
List of all objects that contain an item with the given key
and matching type
"""
def get_children(item: Any) -> List[Any]:
return [val for val in item.values() if isinstance(val, dict)]
found = []
stack = get_children(obj)
while len(stack) > 0:
item = stack.pop()
if key in item and (of_type is None or isinstance(item[key], of_type)):
found.append(item)
stack.extend(get_children(item))
return found
def _convert_string_attrs(node: Dict[str, Any]) -> None:
"""
Deep search string attributes (labelled "s" in GraphDef proto)
and convert ascii code lists to base64-encoded strings if necessary
"""
attr_key = common.TFJS_NODE_ATTR_KEY
str_key = common.TFJS_ATTR_STRING_VALUE_KEY
# some layers (e.g. PReLU) don't contain the `attr` key,
# so test for its presence
attrs: list = []
if attr_key in node:
attrs = _find_if_has_key(node[attr_key], key=str_key, of_type=list)
for attr in attrs:
array = attr[str_key]
# check if conversion is actually necessary
if (len(array) > 0) and isinstance(array, list) \
and isinstance(array[0], int):
string = ''.join(map(chr, array))
binary = string.encode('utf8')
attr[str_key] = base64.encodebytes(binary)
elif len(array) == 0:
attr[str_key] = None
def _fix_dilation_attrs(node: Dict[str, Any]) -> None:
"""
Search dilations-attribute and convert
misaligned dilation rates if necessary see
https://github.com/patlevin/tfjs-to-tf/issues/1
"""
path = ['attr', 'dilations', 'list']
values = node
found = True
for key in path:
if key in values:
values = values[key]
else:
found = False
break
# if dilations are present, they're stored in 'values' now
ints = common.TFJS_ATTR_INT_VALUE_KEY
if found and ints in values and isinstance(values[ints], list):
value = values[ints]
if len(value) != 4:
# must be NCHW-formatted 4D tensor or else TF can't handle it
raise ValueError("Unsupported 'dilations'-attribute in node "
f'{node[common.TFJS_NAME_KEY]}')
# check for [>1,>1,1,1], which is likely a mistranslated [1,>1,>1,1]
if int(value[0], 10) > 1:
values[ints] = ['1', value[0], value[1], '1']
def fix_node_attributes(message_dict: Dict[str, Any]) -> Dict[str, Any]:
"""
Fix various known issues found "in the wild":
• Node attributes in deserialised JSON may contain strings as lists of
ascii codes when the TF GraphDef proto expects base64 encoded strings
• 'dilation' attributes may be misaligned in a way unsupported by TF
Further fixes will be added as issues are reported.
Args:
message_dict: Graph model formatted as parsed JSON dictionary
Returns:
Updated message dictionary with fixes applied if necessary
"""
if common.TFJS_NODE_KEY in message_dict:
nodes = message_dict[common.TFJS_NODE_KEY]
for node in nodes:
_convert_string_attrs(node)
_fix_dilation_attrs(node)
return message_dict
|
[
"base64.encodebytes"
] |
[((2108, 2134), 'base64.encodebytes', 'base64.encodebytes', (['binary'], {}), '(binary)\n', (2126, 2134), False, 'import base64\n')]
|
import os
import sys
sys.path.insert(0, os.path.abspath('../../.'))
from tqdm import tqdm
import torch
from src.model.SparseNet import SparseNet
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from src.model.ImageDataset import NatPatchDataset
from src.utils.cmd_line import parse_args
from src.scripts.plotting import plot_rf
# save to tensorboard
board = SummaryWriter("../../runs/sparse-net")
arg = parse_args()
# if use cuda
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# create net
sparse_net = SparseNet(arg.n_neuron, arg.size, R_lr=arg.r_learning_rate, lmda=arg.reg, device=device)
# load data
dataloader = DataLoader(NatPatchDataset(arg.batch_size, arg.size, arg.size), batch_size=250)
# train
optim = torch.optim.SGD([{'params': sparse_net.U.weight, "lr": arg.learning_rate}])
for e in range(arg.epoch):
running_loss = 0
c = 0
for img_batch in tqdm(dataloader, desc='training', total=len(dataloader)):
img_batch = img_batch.reshape(img_batch.shape[0], -1).to(device)
# update
pred = sparse_net(img_batch)
loss = ((img_batch - pred) ** 2).sum()
running_loss += loss.item()
loss.backward()
# update U
optim.step()
# zero grad
sparse_net.zero_grad()
# norm
sparse_net.normalize_weights()
c += 1
board.add_scalar('Loss', running_loss / c, e * len(dataloader) + c)
if e % 5 == 4:
# plotting
fig = plot_rf(sparse_net.U.weight.T.reshape(arg.n_neuron, arg.size, arg.size).cpu().data.numpy(), arg.n_neuron, arg.size)
board.add_figure('RF', fig, global_step=e * len(dataloader) + c)
if e % 10 == 9:
# save checkpoint
torch.save(sparse_net, f"../../trained_models/ckpt-{e+1}.pth")
torch.save(sparse_net, f"../../trained_models/ckpt-{e+1}.pth")
|
[
"os.path.abspath",
"src.model.ImageDataset.NatPatchDataset",
"src.model.SparseNet.SparseNet",
"src.utils.cmd_line.parse_args",
"torch.save",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter",
"torch.optim.SGD"
] |
[((401, 439), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['"""../../runs/sparse-net"""'], {}), "('../../runs/sparse-net')\n", (414, 439), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((446, 458), 'src.utils.cmd_line.parse_args', 'parse_args', ([], {}), '()\n', (456, 458), False, 'from src.utils.cmd_line import parse_args\n'), ((569, 661), 'src.model.SparseNet.SparseNet', 'SparseNet', (['arg.n_neuron', 'arg.size'], {'R_lr': 'arg.r_learning_rate', 'lmda': 'arg.reg', 'device': 'device'}), '(arg.n_neuron, arg.size, R_lr=arg.r_learning_rate, lmda=arg.reg,\n device=device)\n', (578, 661), False, 'from src.model.SparseNet import SparseNet\n'), ((779, 854), 'torch.optim.SGD', 'torch.optim.SGD', (["[{'params': sparse_net.U.weight, 'lr': arg.learning_rate}]"], {}), "([{'params': sparse_net.U.weight, 'lr': arg.learning_rate}])\n", (794, 854), False, 'import torch\n'), ((1816, 1880), 'torch.save', 'torch.save', (['sparse_net', 'f"""../../trained_models/ckpt-{e + 1}.pth"""'], {}), "(sparse_net, f'../../trained_models/ckpt-{e + 1}.pth')\n", (1826, 1880), False, 'import torch\n'), ((40, 66), 'os.path.abspath', 'os.path.abspath', (['"""../../."""'], {}), "('../../.')\n", (55, 66), False, 'import os\n'), ((694, 745), 'src.model.ImageDataset.NatPatchDataset', 'NatPatchDataset', (['arg.batch_size', 'arg.size', 'arg.size'], {}), '(arg.batch_size, arg.size, arg.size)\n', (709, 745), False, 'from src.model.ImageDataset import NatPatchDataset\n'), ((505, 530), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (528, 530), False, 'import torch\n'), ((1753, 1817), 'torch.save', 'torch.save', (['sparse_net', 'f"""../../trained_models/ckpt-{e + 1}.pth"""'], {}), "(sparse_net, f'../../trained_models/ckpt-{e + 1}.pth')\n", (1763, 1817), False, 'import torch\n')]
|
"""
slixmpp: The Slick XMPP Library
Copyright (C) 2016 <NAME>
This file is part of slixmpp.
See the file LICENSE for copying permission.
"""
from slixmpp.plugins.base import register_plugin
from slixmpp.plugins.xep_0333.stanza import Markable, Received, Displayed, Acknowledged
from slixmpp.plugins.xep_0333.hints import XEP_0333
register_plugin(XEP_0333)
|
[
"slixmpp.plugins.base.register_plugin"
] |
[((350, 375), 'slixmpp.plugins.base.register_plugin', 'register_plugin', (['XEP_0333'], {}), '(XEP_0333)\n', (365, 375), False, 'from slixmpp.plugins.base import register_plugin\n')]
|
"""The PoolSense integration."""
import asyncio
from datetime import timedelta
import logging
import async_timeout
from poolsense import PoolSense
from poolsense.exceptions import PoolSenseError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, update_coordinator
from homeassistant.helpers.update_coordinator import UpdateFailed
from .const import DOMAIN
PLATFORMS = ["sensor"]
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the PoolSense component."""
# Make sure coordinator is initialized.
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up PoolSense from a config entry."""
poolsense = PoolSense()
auth_valid = await poolsense.test_poolsense_credentials(
aiohttp_client.async_get_clientsession(hass),
entry.data[CONF_EMAIL],
entry.data[CONF_PASSWORD],
)
if not auth_valid:
_LOGGER.error("Invalid authentication")
return False
coordinator = await get_coordinator(hass, entry)
await hass.data[DOMAIN][entry.entry_id].async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data[DOMAIN][entry.entry_id] = coordinator
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def get_coordinator(hass, entry):
"""Get the data update coordinator."""
async def async_get_data():
_LOGGER.info("Run query to server")
poolsense = PoolSense()
return_data = {}
with async_timeout.timeout(10):
try:
return_data = await poolsense.get_poolsense_data(
aiohttp_client.async_get_clientsession(hass),
entry.data[CONF_EMAIL],
entry.data[CONF_PASSWORD],
)
except (PoolSenseError) as error:
raise UpdateFailed(error)
return return_data
return update_coordinator.DataUpdateCoordinator(
hass,
logging.getLogger(__name__),
name=DOMAIN,
update_method=async_get_data,
update_interval=timedelta(hours=1),
)
|
[
"homeassistant.helpers.update_coordinator.UpdateFailed",
"async_timeout.timeout",
"datetime.timedelta",
"poolsense.PoolSense",
"logging.getLogger",
"homeassistant.helpers.aiohttp_client.async_get_clientsession"
] |
[((607, 634), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (624, 634), False, 'import logging\n'), ((970, 981), 'poolsense.PoolSense', 'PoolSense', ([], {}), '()\n', (979, 981), False, 'from poolsense import PoolSense\n'), ((2273, 2284), 'poolsense.PoolSense', 'PoolSense', ([], {}), '()\n', (2282, 2284), False, 'from poolsense import PoolSense\n'), ((2800, 2827), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2817, 2827), False, 'import logging\n'), ((1051, 1095), 'homeassistant.helpers.aiohttp_client.async_get_clientsession', 'aiohttp_client.async_get_clientsession', (['hass'], {}), '(hass)\n', (1089, 1095), False, 'from homeassistant.helpers import aiohttp_client, update_coordinator\n'), ((2323, 2348), 'async_timeout.timeout', 'async_timeout.timeout', (['(10)'], {}), '(10)\n', (2344, 2348), False, 'import async_timeout\n'), ((2912, 2930), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (2921, 2930), False, 'from datetime import timedelta\n'), ((2676, 2695), 'homeassistant.helpers.update_coordinator.UpdateFailed', 'UpdateFailed', (['error'], {}), '(error)\n', (2688, 2695), False, 'from homeassistant.helpers.update_coordinator import UpdateFailed\n'), ((2453, 2497), 'homeassistant.helpers.aiohttp_client.async_get_clientsession', 'aiohttp_client.async_get_clientsession', (['hass'], {}), '(hass)\n', (2491, 2497), False, 'from homeassistant.helpers import aiohttp_client, update_coordinator\n')]
|
import logging
import os
import time
import pytest
CURL = "/usr/bin/curl -X POST http://localhost:{} -d hello_my_plugins"
WAITSTATUS = 0.1
def get_factory_args(port):
# TODO: Find better way of getting an interpreter in the current env
interpreter = os.path.abspath("./env-plugin/bin/python")
args = [
interpreter,
"-m",
"restapi_echo_server",
"--host",
"0.0.0.0",
"--port",
str(port),
]
return args
@pytest.fixture(name="echoserver")
def fixture_echoserver(process_factory):
"""
Custom fixture starts an echoserver on port 8090
"""
# TODO: Find better way of getting an interpreter in the current env
interpreter = os.path.abspath("./env-plugin/bin/python")
process = process_factory(
[
interpreter,
"-m",
"restapi_echo_server",
"--host",
"0.0.0.0",
"--port",
"8090",
],
)
process.set_name("echoserver_")
yield process
logging.info("Killing echoserver")
process.kill()
@pytest.fixture(name="echoserver_2")
def fixture_echoserver_2(process_factory):
"""
Custom fixture starts an echoserver on port 8092
"""
# TODO: Find better way of getting an interpreter in the current env
interpreter = os.path.abspath("./env-plugin/bin/python")
process = process_factory(
[
interpreter,
"-m",
"restapi_echo_server",
"--host",
"0.0.0.0",
"--port",
"8092",
],
)
process.set_name("ecoserver_2_")
yield process
logging.info("Killing echoserver")
process.kill()
@pytest.fixture(name="asserts_echoserver")
def fixture_asserts_echoserver():
yield
logging.info("Asserts Echoserver")
@pytest.fixture(name="cleanup_echoserver")
def fixture_cleanup_echoserver():
yield
logging.info("Cleanup Echoserver")
def test_use_case_echo(echoserver):
echoserver.run_bg()
time.sleep(1)
echoserver.kill()
time.sleep(WAITSTATUS)
# If this fails, there is maybe still one running
assert echoserver.get_status() == "NotExisting"
def test_use_case_echo_with_additional_cleanup(
echoserver, asserts_echoserver, cleanup_echoserver
):
_ = asserts_echoserver # for now just use them otherwise pylint will complain
_ = cleanup_echoserver
# Does not work right
echoserver.run_bg()
time.sleep(0.1)
def test_use_case_echo_and_curl(process_factory, process):
# TODO: Find better way of getting an interpreter in the current env
interpreter = os.path.abspath("./env-plugin/bin/python")
server = process_factory(
[
interpreter,
"-m",
"restapi_echo_server",
"--host",
"0.0.0.0",
"--port",
"8080",
]
)
server.run_bg()
# give the server 100ms to start in the background
time.sleep(0.1)
process.set_command(
CURL.format(8080).split(),
)
assert process.run() == 0
def test_use_case_echo_and_curl_from_factory(process_factory):
# TODO: Find better way of getting an interpreter in the current env
interpreter = os.path.abspath("./env-plugin/bin/python")
server = process_factory(
[
interpreter,
"-m",
"restapi_echo_server",
"--host",
"0.0.0.0",
"--port",
"8080",
],
"server_",
)
server.run_bg()
time.sleep(WAITSTATUS)
assert server.get_status() == "Running" # make sure it still runs
# give the server 100ms to start in the background
time.sleep(0.1)
client = process_factory(
CURL.format(8080).split(),
"client_",
)
client.run_bg()
time.sleep(WAITSTATUS)
assert client.get_status() == 0
server.kill()
time.sleep(WAITSTATUS)
assert server.get_status() == "NotExisting"
# For weird reasons the echoserver logs to stderr
assert server.get_stdout() == ""
assert "hello_my_plugins" in server.get_stderr()
def test_use_case_echoserver_fixture_and_curl(process_factory, echoserver):
echoserver.run_bg()
time.sleep(WAITSTATUS) # give the server some time to start
assert echoserver.get_status() == "Running" # make sure it still runs
# give the server 100ms to start in the background
time.sleep(0.1)
client = process_factory(
CURL.format(8090).split(),
"client_",
)
client.run_bg()
time.sleep(WAITSTATUS)
assert client.get_status() == 0
echoserver.kill()
time.sleep(WAITSTATUS)
assert echoserver.get_status() == "NotExisting"
assert (
echoserver.get_stdout() == ""
) # For weird reasons the echoserver logs to stderr
assert "hello_my_plugins" in echoserver.get_stderr()
def test_use_case_echoserver_1_and_2(process_factory, echoserver, echoserver_2):
echoserver_1 = echoserver
echoserver_1.run_bg()
echoserver_2.run_bg()
time.sleep(0.1)
assert echoserver_1.get_status() == "Running"
assert echoserver_2.get_status() == "Running"
time.sleep(0.1)
client_a = process_factory(
CURL.format(8090).split(),
"client_a_",
)
client_b = process_factory(
CURL.format(8092).split(),
"client_b_",
)
client_a.run_bg()
client_b.run_bg()
time.sleep(0.1)
assert client_a.get_status() == 0
assert client_b.get_status() == 0
echoserver_1.kill()
echoserver_2.kill()
time.sleep(0.1)
assert echoserver_1.get_status() == "NotExisting"
assert echoserver_2.get_status() == "NotExisting"
assert "hello_my_plugins" in echoserver_1.get_stderr()
assert "hello_my_plugins" in echoserver_2.get_stderr()
def test_use_case_echo_and_curl_from_factory_n(process_factory):
amount = 10
servers = []
clients = []
for i in range(amount):
server = process_factory(get_factory_args(8080 + i), f"server_{i}_")
server.run_bg()
servers.append(server)
time.sleep(0.1)
logging.info("Polling server status")
for server in servers:
status = server.get_status()
if status != "Running":
logging.error("Something went wrong here is stdout")
logging.error(server.get_stdout())
logging.error("Something went wrong here is stderr")
logging.error(server.get_stderr())
assert status == "Running"
time.sleep(0.5)
logging.info("Starting clients")
for i in range(amount):
client = process_factory(
CURL.format(8080 + i).split(),
f"client_{i}_",
)
client.run_bg()
time.sleep(0.5)
logging.info("Polling clients")
# We expect, that all clients exited with zero
for client in clients:
assert client.get_status() == 0
clients.append(client)
for server in servers:
server.kill()
time.sleep(0.1)
for server in servers:
assert server.get_status() == "NotExisting"
for server in servers:
# For weird reasons the echoserver logs to stderr
assert server.get_stdout() == ""
assert "hello_my_plugins" in server.get_stderr()
for client in clients:
assert "method" in client.get_stdout()
assert "Total" in client.get_stderr()
|
[
"os.path.abspath",
"logging.error",
"pytest.fixture",
"time.sleep",
"logging.info"
] |
[((484, 517), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""echoserver"""'}), "(name='echoserver')\n", (498, 517), False, 'import pytest\n'), ((1100, 1135), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""echoserver_2"""'}), "(name='echoserver_2')\n", (1114, 1135), False, 'import pytest\n'), ((1721, 1762), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""asserts_echoserver"""'}), "(name='asserts_echoserver')\n", (1735, 1762), False, 'import pytest\n'), ((1849, 1890), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""cleanup_echoserver"""'}), "(name='cleanup_echoserver')\n", (1863, 1890), False, 'import pytest\n'), ((262, 304), 'os.path.abspath', 'os.path.abspath', (['"""./env-plugin/bin/python"""'], {}), "('./env-plugin/bin/python')\n", (277, 304), False, 'import os\n'), ((719, 761), 'os.path.abspath', 'os.path.abspath', (['"""./env-plugin/bin/python"""'], {}), "('./env-plugin/bin/python')\n", (734, 761), False, 'import os\n'), ((1043, 1077), 'logging.info', 'logging.info', (['"""Killing echoserver"""'], {}), "('Killing echoserver')\n", (1055, 1077), False, 'import logging\n'), ((1339, 1381), 'os.path.abspath', 'os.path.abspath', (['"""./env-plugin/bin/python"""'], {}), "('./env-plugin/bin/python')\n", (1354, 1381), False, 'import os\n'), ((1664, 1698), 'logging.info', 'logging.info', (['"""Killing echoserver"""'], {}), "('Killing echoserver')\n", (1676, 1698), False, 'import logging\n'), ((1811, 1845), 'logging.info', 'logging.info', (['"""Asserts Echoserver"""'], {}), "('Asserts Echoserver')\n", (1823, 1845), False, 'import logging\n'), ((1939, 1973), 'logging.info', 'logging.info', (['"""Cleanup Echoserver"""'], {}), "('Cleanup Echoserver')\n", (1951, 1973), False, 'import logging\n'), ((2040, 2053), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2050, 2053), False, 'import time\n'), ((2080, 2102), 'time.sleep', 'time.sleep', (['WAITSTATUS'], {}), '(WAITSTATUS)\n', (2090, 2102), False, 'import time\n'), ((2482, 2497), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2492, 2497), False, 'import time\n'), ((2650, 2692), 'os.path.abspath', 'os.path.abspath', (['"""./env-plugin/bin/python"""'], {}), "('./env-plugin/bin/python')\n", (2665, 2692), False, 'import os\n'), ((2993, 3008), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3003, 3008), False, 'import time\n'), ((3261, 3303), 'os.path.abspath', 'os.path.abspath', (['"""./env-plugin/bin/python"""'], {}), "('./env-plugin/bin/python')\n", (3276, 3303), False, 'import os\n'), ((3569, 3591), 'time.sleep', 'time.sleep', (['WAITSTATUS'], {}), '(WAITSTATUS)\n', (3579, 3591), False, 'import time\n'), ((3722, 3737), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3732, 3737), False, 'import time\n'), ((3852, 3874), 'time.sleep', 'time.sleep', (['WAITSTATUS'], {}), '(WAITSTATUS)\n', (3862, 3874), False, 'import time\n'), ((3933, 3955), 'time.sleep', 'time.sleep', (['WAITSTATUS'], {}), '(WAITSTATUS)\n', (3943, 3955), False, 'import time\n'), ((4255, 4277), 'time.sleep', 'time.sleep', (['WAITSTATUS'], {}), '(WAITSTATUS)\n', (4265, 4277), False, 'import time\n'), ((4450, 4465), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4460, 4465), False, 'import time\n'), ((4580, 4602), 'time.sleep', 'time.sleep', (['WAITSTATUS'], {}), '(WAITSTATUS)\n', (4590, 4602), False, 'import time\n'), ((4665, 4687), 'time.sleep', 'time.sleep', (['WAITSTATUS'], {}), '(WAITSTATUS)\n', (4675, 4687), False, 'import time\n'), ((5076, 5091), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5086, 5091), False, 'import time\n'), ((5197, 5212), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5207, 5212), False, 'import time\n'), ((5452, 5467), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5462, 5467), False, 'import time\n'), ((5597, 5612), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5607, 5612), False, 'import time\n'), ((6124, 6139), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6134, 6139), False, 'import time\n'), ((6145, 6182), 'logging.info', 'logging.info', (['"""Polling server status"""'], {}), "('Polling server status')\n", (6157, 6182), False, 'import logging\n'), ((6547, 6562), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (6557, 6562), False, 'import time\n'), ((6567, 6599), 'logging.info', 'logging.info', (['"""Starting clients"""'], {}), "('Starting clients')\n", (6579, 6599), False, 'import logging\n'), ((6773, 6788), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (6783, 6788), False, 'import time\n'), ((6793, 6824), 'logging.info', 'logging.info', (['"""Polling clients"""'], {}), "('Polling clients')\n", (6805, 6824), False, 'import logging\n'), ((7030, 7045), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (7040, 7045), False, 'import time\n'), ((6291, 6343), 'logging.error', 'logging.error', (['"""Something went wrong here is stdout"""'], {}), "('Something went wrong here is stdout')\n", (6304, 6343), False, 'import logging\n'), ((6403, 6455), 'logging.error', 'logging.error', (['"""Something went wrong here is stderr"""'], {}), "('Something went wrong here is stderr')\n", (6416, 6455), False, 'import logging\n')]
|
import io
import os
import unittest
import numpy as np
from sklearn.linear_model import LogisticRegression
from dragnet import Extractor
from dragnet.blocks import TagCountNoCSSReadabilityBlockifier
from dragnet.util import get_and_union_features
from dragnet.compat import str_cast
with io.open(os.path.join('test', 'datafiles', 'models_testing.html'), 'r') as f:
big_html_doc = f.read()
class TestExtractor(unittest.TestCase):
def test_extractor(self):
prob_threshold = 0.5
blockifier = TagCountNoCSSReadabilityBlockifier()
features = get_and_union_features(['weninger', 'kohlschuetter', 'readability'])
# initialize model from pre-fit attributes
model_attrs = {
'C': 1.0,
'class_weight': None,
'classes_': [0, 1],
'coef_': [[0.00501458328421719, -0.0006331822163374379, -0.6699789320373452, 0.026069227973339763, -1.5552477377277252, 0.02980432745983307, -0.965575689884716, 0.019509367890934326, -0.35692924115362307]],
'dual': False,
'fit_intercept': True,
'intercept_': [-1.2071425754440765],
'intercept_scaling': 1,
'max_iter': 100,
'multi_class': 'ovr',
'n_iter_': [12],
'n_jobs': 1,
'penalty': 'l2',
'solver': 'liblinear',
'tol': 0.0001,
'warm_start': False}
model = LogisticRegression()
for k, v in model_attrs.items():
if isinstance(v, list):
setattr(model, k, np.array(v))
else:
setattr(model, k, v)
# extract content via the extractor class
extractor = Extractor(blockifier, features=features, model=model,
to_extract='content', prob_threshold=prob_threshold)
extractor_content = extractor.extract(big_html_doc)
# extract content via individual components
blocks = blockifier.blockify(big_html_doc)
features_mat = features.transform(blocks)
positive_idx = list(model.classes_).index(1)
preds = (model.predict_proba(features_mat) > prob_threshold)[:, positive_idx].astype(int)
components_content = '\n'.join(str_cast(blocks[ind].text) for ind in np.flatnonzero(preds))
self.assertIsNotNone(extractor_content)
self.assertEqual(extractor_content, components_content)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"dragnet.Extractor",
"numpy.flatnonzero",
"dragnet.util.get_and_union_features",
"sklearn.linear_model.LogisticRegression",
"dragnet.compat.str_cast",
"numpy.array",
"dragnet.blocks.TagCountNoCSSReadabilityBlockifier",
"os.path.join"
] |
[((2451, 2466), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2464, 2466), False, 'import unittest\n'), ((300, 356), 'os.path.join', 'os.path.join', (['"""test"""', '"""datafiles"""', '"""models_testing.html"""'], {}), "('test', 'datafiles', 'models_testing.html')\n", (312, 356), False, 'import os\n'), ((521, 557), 'dragnet.blocks.TagCountNoCSSReadabilityBlockifier', 'TagCountNoCSSReadabilityBlockifier', ([], {}), '()\n', (555, 557), False, 'from dragnet.blocks import TagCountNoCSSReadabilityBlockifier\n'), ((577, 645), 'dragnet.util.get_and_union_features', 'get_and_union_features', (["['weninger', 'kohlschuetter', 'readability']"], {}), "(['weninger', 'kohlschuetter', 'readability'])\n", (599, 645), False, 'from dragnet.util import get_and_union_features\n'), ((1432, 1452), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1450, 1452), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1703, 1813), 'dragnet.Extractor', 'Extractor', (['blockifier'], {'features': 'features', 'model': 'model', 'to_extract': '"""content"""', 'prob_threshold': 'prob_threshold'}), "(blockifier, features=features, model=model, to_extract='content',\n prob_threshold=prob_threshold)\n", (1712, 1813), False, 'from dragnet import Extractor\n'), ((2244, 2270), 'dragnet.compat.str_cast', 'str_cast', (['blocks[ind].text'], {}), '(blocks[ind].text)\n', (2252, 2270), False, 'from dragnet.compat import str_cast\n'), ((1564, 1575), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (1572, 1575), True, 'import numpy as np\n'), ((2282, 2303), 'numpy.flatnonzero', 'np.flatnonzero', (['preds'], {}), '(preds)\n', (2296, 2303), True, 'import numpy as np\n')]
|
"""Initial metrics
Revision ID: 6f9266e7a5fb
Revises: 51415576d3e9
Create Date: 2017-12-12 10:38:27.166562
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import MetricSetType, MetricsKey
# revision identifiers, used by Alembic.
revision = "6f9266e7a5fb"
down_revision = "51415576d3e9"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"metric_set",
sa.Column("metric_set_id", sa.String(length=50), nullable=False),
sa.Column("metric_set_type", model.utils.Enum(MetricSetType), nullable=False),
sa.Column("last_modified", model.utils.UTCDateTime(), nullable=False),
sa.PrimaryKeyConstraint("metric_set_id"),
schema="metrics",
)
op.create_table(
"aggregate_metrics",
sa.Column("metric_set_id", sa.String(length=50), nullable=False),
sa.Column("metrics_key", model.utils.Enum(MetricsKey), nullable=False),
sa.Column("value", sa.String(length=50), nullable=False),
sa.Column("count", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(["metric_set_id"], ["metrics.metric_set.metric_set_id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("metric_set_id", "metrics_key", "value"),
schema="metrics",
)
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("aggregate_metrics", schema="metrics")
op.drop_table("metric_set", schema="metrics")
# ### end Alembic commands ###
|
[
"alembic.op.drop_table",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.ForeignKeyConstraint",
"sqlalchemy.String",
"sqlalchemy.Integer"
] |
[((1930, 1982), 'alembic.op.drop_table', 'op.drop_table', (['"""aggregate_metrics"""'], {'schema': '"""metrics"""'}), "('aggregate_metrics', schema='metrics')\n", (1943, 1982), False, 'from alembic import op\n'), ((1987, 2032), 'alembic.op.drop_table', 'op.drop_table', (['"""metric_set"""'], {'schema': '"""metrics"""'}), "('metric_set', schema='metrics')\n", (2000, 2032), False, 'from alembic import op\n'), ((1180, 1220), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""metric_set_id"""'], {}), "('metric_set_id')\n", (1203, 1220), True, 'import sqlalchemy as sa\n'), ((1590, 1695), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['metric_set_id']", "['metrics.metric_set.metric_set_id']"], {'ondelete': '"""CASCADE"""'}), "(['metric_set_id'], [\n 'metrics.metric_set.metric_set_id'], ondelete='CASCADE')\n", (1613, 1695), True, 'import sqlalchemy as sa\n'), ((1700, 1764), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""metric_set_id"""', '"""metrics_key"""', '"""value"""'], {}), "('metric_set_id', 'metrics_key', 'value')\n", (1723, 1764), True, 'import sqlalchemy as sa\n'), ((967, 987), 'sqlalchemy.String', 'sa.String', ([], {'length': '(50)'}), '(length=50)\n', (976, 987), True, 'import sqlalchemy as sa\n'), ((1339, 1359), 'sqlalchemy.String', 'sa.String', ([], {'length': '(50)'}), '(length=50)\n', (1348, 1359), True, 'import sqlalchemy as sa\n'), ((1485, 1505), 'sqlalchemy.String', 'sa.String', ([], {'length': '(50)'}), '(length=50)\n', (1494, 1505), True, 'import sqlalchemy as sa\n'), ((1551, 1563), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1561, 1563), True, 'import sqlalchemy as sa\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Pyle makes it easy to use Python as a replacement for command line tools such as `sed` or `perl`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from future.utils import string_types
import argparse
import io
import re
import sh
import sys
import traceback
__version__ = "0.4.1"
STANDARD_MODULES = {
're': re,
'sh': sh
}
def truncate_ellipsis(line, length=30):
"""Truncate a line to the specified length followed by ``...`` unless its shorter than length already."""
return line if len(line) < length else line[:length - 3] + "..."
def pyle_evaluate(expressions=None, modules=(), inplace=False, files=None, print_traceback=False):
"""The main method of pyle."""
eval_globals = {}
eval_globals.update(STANDARD_MODULES)
for module_arg in modules or ():
for module in module_arg.strip().split(","):
module = module.strip()
if module:
eval_globals[module] = __import__(module)
if not expressions:
# Default 'do nothing' program
expressions = ['line']
encoding = sys.getdefaultencoding()
files = files or ['-']
eval_locals = {}
for file in files:
if file == '-':
file = sys.stdin
out_buf = sys.stdout if not inplace else io.StringIO()
out_line = None
with (io.open(file, 'r', encoding=encoding) if not hasattr(file, 'read') else file) as in_file:
for num, line in enumerate(in_file.readlines()):
was_whole_line = False
if line[-1] == '\n':
was_whole_line = True
line = line[:-1]
expr = ""
try:
for expr in expressions:
words = [word.strip()
for word in re.split(r'\s+', line)
if word]
eval_locals.update({
'line': line,
'words': words,
'filename': in_file.name,
'num': num
})
out_line = eval(expr, eval_globals, eval_locals)
if out_line is None:
continue
# If the result is something list-like or iterable,
# output each item space separated.
if not isinstance(out_line, string_types):
try:
out_line = u' '.join(str(part) for part in out_line)
except:
# Guess it wasn't a list after all.
out_line = str(out_line)
line = out_line
except Exception as e:
sys.stdout.flush()
sys.stderr.write("At %s:%d ('%s'): `%s`: %s\n" % (
in_file.name, num, truncate_ellipsis(line), expr, e))
if print_traceback:
traceback.print_exc(None, sys.stderr)
else:
if out_line is None:
continue
out_line = out_line or u''
out_buf.write(out_line)
if was_whole_line:
out_buf.write('\n')
if inplace:
with io.open(file, 'w', encoding=encoding) as out_file:
out_file.write(out_buf.getvalue())
out_buf.close()
def pyle(argv=None):
"""Execute pyle with the specified arguments, or sys.argv if no arguments specified."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-m", "--modules", dest="modules", action='append',
help="import MODULE before evaluation. May be specified more than once.")
parser.add_argument("-i", "--inplace", dest="inplace", action='store_true', default=False,
help="edit files in place. When used with file name arguments, the files will be replaced by the output of the evaluation")
parser.add_argument("-e", "--expression", action="append",
dest="expressions", help="an expression to evaluate for each line")
parser.add_argument('files', nargs='*',
help="files to read as input. If used with --inplace, the files will be replaced with the output")
parser.add_argument("--traceback", action="store_true", default=False,
help="print a traceback on stderr when an expression fails for a line")
args = parser.parse_args() if not argv else parser.parse_args(argv)
pyle_evaluate(args.expressions, args.modules, args.inplace, args.files, args.traceback)
if __name__ == '__main__':
pyle()
|
[
"io.StringIO",
"traceback.print_exc",
"re.split",
"argparse.ArgumentParser",
"future.standard_library.install_aliases",
"sys.getdefaultencoding",
"sys.stdout.flush",
"io.open"
] |
[((339, 373), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (371, 373), False, 'from future import standard_library\n'), ((1305, 1329), 'sys.getdefaultencoding', 'sys.getdefaultencoding', ([], {}), '()\n', (1327, 1329), False, 'import sys\n'), ((3943, 3987), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (3966, 3987), False, 'import argparse\n'), ((1505, 1518), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1516, 1518), False, 'import io\n'), ((1558, 1595), 'io.open', 'io.open', (['file', '"""r"""'], {'encoding': 'encoding'}), "(file, 'r', encoding=encoding)\n", (1565, 1595), False, 'import io\n'), ((3684, 3721), 'io.open', 'io.open', (['file', '"""w"""'], {'encoding': 'encoding'}), "(file, 'w', encoding=encoding)\n", (3691, 3721), False, 'import io\n'), ((3105, 3123), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3121, 3123), False, 'import sys\n'), ((3337, 3374), 'traceback.print_exc', 'traceback.print_exc', (['None', 'sys.stderr'], {}), '(None, sys.stderr)\n', (3356, 3374), False, 'import traceback\n'), ((2049, 2071), 're.split', 're.split', (['"""\\\\s+"""', 'line'], {}), "('\\\\s+', line)\n", (2057, 2071), False, 'import re\n')]
|
import hashlib
ENCODED = 'sha1$bh9ul$8e808fcea5418aa971311ea1598df65627ea3b98'
_, SALT, PASSWORD = ENCODED.split('$')
def check(possibility):
return hashlib.sha1(SALT + possibility).hexdigest() == PASSWORD
f = open('solutions/official/CSW12.txt', 'rb')
for row in f:
row = row.rstrip()
if not row:
continue
if ' ' in row:
word, _ = row.split(' ', 1)
else:
word = row
if check(word.lower()):
print(u'DECODED {0}'.format(word))
break
else:
print(u'Solution not found')
f.close()
|
[
"hashlib.sha1"
] |
[((152, 184), 'hashlib.sha1', 'hashlib.sha1', (['(SALT + possibility)'], {}), '(SALT + possibility)\n', (164, 184), False, 'import hashlib\n')]
|
"""Copyright © 2020-present, Swisscom (Schweiz) AG.
All rights reserved."""
from .feature import Feature
from scipy.stats import entropy
import numpy as np
class KLDivergence(Feature):
r"""
A feature that computes the KL divergence between the
logits of each data points given by a classifier mean logits
for each label and the mean of these logits for each label
----------
mean_logits : array-like of shape (n_classes, n_classes) is the mean of the logits of datapoints
having the same label. First dimension should be labels, second should be the mean logit for
this label
Attributes
----------
mean_logits: ' '
"""
def __init__(self, mean_logits):
self.mean_logits = mean_logits
def augment(self, logits):
"""
Performs the data augmentation.
Computes the KL divergence between the parameter logits and
the attribute mean_logits
:param
logits: array-like of shape (n_classes, n_samples)
:return:
C : array-like of shape (n_classes, n_samples)
"""
return np.array([entropy(logits,
np.repeat(mean_logit[..., np.newaxis], logits.shape[1], axis=1), base=2)
for mean_logit in self.mean_logits])
|
[
"numpy.repeat"
] |
[((1168, 1231), 'numpy.repeat', 'np.repeat', (['mean_logit[..., np.newaxis]', 'logits.shape[1]'], {'axis': '(1)'}), '(mean_logit[..., np.newaxis], logits.shape[1], axis=1)\n', (1177, 1231), True, 'import numpy as np\n')]
|
import Anime_Scraper
import Color
import warnings
import ssl
import argparse
import requests
import shutil
import os
import re
import sys
from platform import system
from threading import Thread
from queue import Queue
from art import text2art
directory = ""
threads = 1
token = None
titles = False
args = None
gui = None
class Worker(Thread) :
def __init__(self, tasks) :
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.start()
def run(self) :
global gui
while True :
func, arg, kargs = self.tasks.get()
try :
func(*arg, **kargs)
except Exception as ex :
# print(ex)
Color.printer("ERROR", ex, gui)
finally :
self.tasks.task_done()
class ThreadPool :
def __init__(self, num_threads) :
self.tasks = Queue(num_threads)
for _ in range(num_threads) :
Worker(self.tasks)
def add_task(self, func, *arg, **kargs) :
self.tasks.put((func, arg, kargs))
def map(self, func, args_list) :
for arg in args_list :
self.add_task(func, arg)
def wait_completion(self) :
self.tasks.join()
def clean_file_name(file_name) :
for c in r'[]/\;,><&*:%=+@#^()|?^':
file_name = file_name.replace(c,'')
return file_name
def download_episode(episode) :
global titles, gui
Color.printer("INFO","Downloading "+episode.episode+"...", gui)
if system() == "Windows" :
episode.title = clean_file_name(episode.title)
if titles :
file_name = directory + episode.episode + " - " + episode.title + ".mp4"
else :
file_name = directory+episode.episode+".mp4"
with requests.get(episode.download_url, stream=True, verify=False) as r:
with open(file_name, 'wb') as f:
shutil.copyfileobj(r.raw, f, length=16*1024*1024)
Color.printer("INFO",episode.episode + " finished downloading...", gui)
def download() :
global directory, threads, gui
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
Color.printer("INFO","Downloading started...", gui)
# for episode in Anime_Scraper.episodes :
# print("Downloading", episode.episode)
# urllib.request.urlretrieve(episode.download_url, directory+episode.episode+".mp4")
pool = ThreadPool(threads)
pool.map(download_episode, Anime_Scraper.episodes)
pool.wait_completion()
Color.printer("INFO", "Downloading finished!", gui)
def print_banner() :
banner = text2art("Anime Downloader")
Color.printer("BANNER", banner)
def main() :
global directory, args, threads, titles, token
print_banner()
parser = argparse.ArgumentParser(description="Anime Downloader Command Line Tool")
argparse.ArgumentParser(description="Help option parcer for Anime Downloader Command Line Tool", add_help=False, formatter_class=argparse.HelpFormatter)
parser.add_argument("-u", "--url", required=True, help="9Anime.to URL for the anime to be downloaded", dest="url")
parser.add_argument("-n", "--names", required=True, help="https://www.animefillerlist.com/ URL to retrieve episode titles", dest="title_url")
parser.add_argument("-d", "--directory", required=False, help="Download destination. Will use the current directory if not provided", default="" , dest="dir")
parser.add_argument("-s", "--start", required=False, help="Starting episode",default=1, type=int , dest="start")
parser.add_argument("-e", "--end", required=False, help="End episode", default=9999, type=int ,dest="end")
parser.add_argument("-c", "--code", required=False, help="Recaptcha answer token code. Insert this if you don't have 2captcha captcha bypass api_key", default=None, dest="token")
parser.add_argument("-t", "--threads", required=False, help="Number of parrallel downloads. Will download sequencially if not provided", default=1, type=int ,dest="threads")
parser.add_argument("-f", "--filler", required=False, help="Whether fillers needed", default=True, type=bool ,dest="isFiller")
args = parser.parse_args()
Anime_Scraper.download_9anime_url = args.url
Anime_Scraper.title_url = args.title_url
Anime_Scraper.isFiller = args.isFiller
# Anime_Scraper.ts_no = args.ts_no
token = args.token
directory = args.dir
threads = args.threads
if args.title_url :
titles = True
if directory != "" :
directory = directory.replace("\\", "/")
if not directory.endswith("/") :
directory+="/"
Anime_Scraper.main(args.start, args.end, token)
download()
if __name__ == "__main__":
#suppress warnings
warnings.filterwarnings("ignore")
#activate color codes
if sys.platform.lower() == "win32" :
os.system("color")
main()
|
[
"threading.Thread.__init__",
"argparse.ArgumentParser",
"warnings.filterwarnings",
"os.system",
"sys.platform.lower",
"Color.printer",
"art.text2art",
"Anime_Scraper.main",
"requests.get",
"platform.system",
"shutil.copyfileobj",
"queue.Queue"
] |
[((1470, 1538), 'Color.printer', 'Color.printer', (['"""INFO"""', "('Downloading ' + episode.episode + '...')", 'gui'], {}), "('INFO', 'Downloading ' + episode.episode + '...', gui)\n", (1483, 1538), False, 'import Color\n'), ((1969, 2041), 'Color.printer', 'Color.printer', (['"""INFO"""', "(episode.episode + ' finished downloading...')", 'gui'], {}), "('INFO', episode.episode + ' finished downloading...', gui)\n", (1982, 2041), False, 'import Color\n'), ((2461, 2513), 'Color.printer', 'Color.printer', (['"""INFO"""', '"""Downloading started..."""', 'gui'], {}), "('INFO', 'Downloading started...', gui)\n", (2474, 2513), False, 'import Color\n'), ((2825, 2876), 'Color.printer', 'Color.printer', (['"""INFO"""', '"""Downloading finished!"""', 'gui'], {}), "('INFO', 'Downloading finished!', gui)\n", (2838, 2876), False, 'import Color\n'), ((2913, 2944), 'art.text2art', 'text2art', (['"""Anime Downloader"""'], {}), "('Anime Downloader')\n", (2921, 2944), False, 'from art import text2art\n'), ((2949, 2980), 'Color.printer', 'Color.printer', (['"""BANNER"""', 'banner'], {}), "('BANNER', banner)\n", (2962, 2980), False, 'import Color\n'), ((3081, 3154), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Anime Downloader Command Line Tool"""'}), "(description='Anime Downloader Command Line Tool')\n", (3104, 3154), False, 'import argparse\n'), ((3159, 3321), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Help option parcer for Anime Downloader Command Line Tool"""', 'add_help': '(False)', 'formatter_class': 'argparse.HelpFormatter'}), "(description=\n 'Help option parcer for Anime Downloader Command Line Tool', add_help=\n False, formatter_class=argparse.HelpFormatter)\n", (3182, 3321), False, 'import argparse\n'), ((4944, 4991), 'Anime_Scraper.main', 'Anime_Scraper.main', (['args.start', 'args.end', 'token'], {}), '(args.start, args.end, token)\n', (4962, 4991), False, 'import Anime_Scraper\n'), ((5063, 5096), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (5086, 5096), False, 'import warnings\n'), ((392, 413), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (407, 413), False, 'from threading import Thread\n'), ((909, 927), 'queue.Queue', 'Queue', (['num_threads'], {}), '(num_threads)\n', (914, 927), False, 'from queue import Queue\n'), ((1542, 1550), 'platform.system', 'system', ([], {}), '()\n', (1548, 1550), False, 'from platform import system\n'), ((1793, 1854), 'requests.get', 'requests.get', (['episode.download_url'], {'stream': '(True)', 'verify': '(False)'}), '(episode.download_url, stream=True, verify=False)\n', (1805, 1854), False, 'import requests\n'), ((5135, 5155), 'sys.platform.lower', 'sys.platform.lower', ([], {}), '()\n', (5153, 5155), False, 'import sys\n'), ((5177, 5195), 'os.system', 'os.system', (['"""color"""'], {}), "('color')\n", (5186, 5195), False, 'import os\n'), ((1914, 1967), 'shutil.copyfileobj', 'shutil.copyfileobj', (['r.raw', 'f'], {'length': '(16 * 1024 * 1024)'}), '(r.raw, f, length=16 * 1024 * 1024)\n', (1932, 1967), False, 'import shutil\n'), ((737, 768), 'Color.printer', 'Color.printer', (['"""ERROR"""', 'ex', 'gui'], {}), "('ERROR', ex, gui)\n", (750, 768), False, 'import Color\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mail', '0098_auto_20170522_0940'),
]
operations = [
migrations.AddField(
model_name='checksettings',
name='hrisk_diff_sender_count',
field=models.IntegerField(default=3, help_text='\u4e00\u5929\u5185 \u540c\u4e00\u53d1\u4ef6\u4eba\u540d\u79f0\u4e0d\u540c\u503c\u8d85\u8fc7N\u6b21\uff0c \u5219\u5728\u4ee5\u540e\u7684M\u65f6\u95f4\u5185\u62e6\u622a\u5176\u6240\u6709\u90ae\u4ef6\uff0c\u5e76\u653e\u5165\u201c\u9ad8\u5371\u53d1\u4ef6\u4eba\u201d\u8fdb\u884c\u4eba\u5de5\u5ba1\u6838', verbose_name='\u540d\u79f0\u4e0d\u540c\u7684\u9ad8\u5371\u53d1\u4ef6\u4eba(\u4e0d\u540c\u6b21\u6570)'),
),
migrations.AddField(
model_name='checksettings',
name='hrisk_diff_sender_time',
field=models.IntegerField(default=600, help_text='\u5355\u4f4d:\u5206\u949f, \u4e00\u5929\u5185 \u540c\u4e00\u53d1\u4ef6\u4eba\u540d\u79f0\u4e0d\u540c\u503c\u8d85\u8fc7N\u6b21\uff0c \u5219\u5728\u4ee5\u540e\u7684M\u65f6\u95f4\u5185\u62e6\u622a\u5176\u6240\u6709\u90ae\u4ef6\uff0c\u5e76\u653e\u5165\u201c\u9ad8\u5371\u53d1\u4ef6\u4eba\u201d\u8fdb\u884c\u4eba\u5de5\u5ba1\u6838', verbose_name='\u540d\u79f0\u4e0d\u540c\u7684\u9ad8\u5371\u53d1\u4ef6\u4eba(\u62e6\u622a\u65f6\u95f4)'),
),
]
|
[
"django.db.models.IntegerField"
] |
[((371, 511), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(3)', 'help_text': '"""一天内 同一发件人名称不同值超过N次, 则在以后的M时间内拦截其所有邮件,并放入“高危发件人”进行人工审核"""', 'verbose_name': '"""名称不同的高危发件人(不同次数)"""'}), "(default=3, help_text=\n '一天内 同一发件人名称不同值超过N次, 则在以后的M时间内拦截其所有邮件,并放入“高危发件人”进行人工审核', verbose_name=\n '名称不同的高危发件人(不同次数)')\n", (390, 511), False, 'from django.db import models, migrations\n'), ((959, 1107), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(600)', 'help_text': '"""单位:分钟, 一天内 同一发件人名称不同值超过N次, 则在以后的M时间内拦截其所有邮件,并放入“高危发件人”进行人工审核"""', 'verbose_name': '"""名称不同的高危发件人(拦截时间)"""'}), "(default=600, help_text=\n '单位:分钟, 一天内 同一发件人名称不同值超过N次, 则在以后的M时间内拦截其所有邮件,并放入“高危发件人”进行人工审核',\n verbose_name='名称不同的高危发件人(拦截时间)')\n", (978, 1107), False, 'from django.db import models, migrations\n')]
|
"""
Generate Accelerated Thrift bindings
"""
import os
import argparse
import re
import shutil
import subprocess
import sys
xpr_hints = re.compile(".*completion_hints.*")
def parse_args(args=None):
parser = argparse.ArgumentParser(description='Run some benchmarks')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin, help="mapd.thrift file")
parser.add_argument('outfile', nargs='?', default="mapd.thrift",
help="Patched mapd.thrift file")
return parser.parse_args(args)
def thrift_gen(spec):
subprocess.check_output(['thrift', '-gen', 'py', '-r', spec])
def main(args=None):
args = parse_args(args)
thrift = args.infile.readlines()
new = [x for x in thrift if not xpr_hints.match(x)]
with open(args.outfile, 'wt') as f:
f.write(''.join(new))
try:
thrift_gen(args.outfile)
shutil.rmtree("mapd", ignore_errors=True)
shutil.copytree(os.path.join("gen-py", "mapd"), "mapd")
finally:
os.remove(args.outfile)
shutil.rmtree("gen-py")
if __name__ == '__main__':
sys.exit(main(None))
|
[
"os.remove",
"argparse.ArgumentParser",
"subprocess.check_output",
"shutil.rmtree",
"os.path.join",
"argparse.FileType",
"re.compile"
] |
[((137, 171), 're.compile', 're.compile', (['""".*completion_hints.*"""'], {}), "('.*completion_hints.*')\n", (147, 171), False, 'import re\n'), ((214, 272), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run some benchmarks"""'}), "(description='Run some benchmarks')\n", (237, 272), False, 'import argparse\n'), ((604, 665), 'subprocess.check_output', 'subprocess.check_output', (["['thrift', '-gen', 'py', '-r', spec]"], {}), "(['thrift', '-gen', 'py', '-r', spec])\n", (627, 665), False, 'import subprocess\n'), ((931, 972), 'shutil.rmtree', 'shutil.rmtree', (['"""mapd"""'], {'ignore_errors': '(True)'}), "('mapd', ignore_errors=True)\n", (944, 972), False, 'import shutil\n'), ((1058, 1081), 'os.remove', 'os.remove', (['args.outfile'], {}), '(args.outfile)\n', (1067, 1081), False, 'import os\n'), ((1090, 1113), 'shutil.rmtree', 'shutil.rmtree', (['"""gen-py"""'], {}), "('gen-py')\n", (1103, 1113), False, 'import shutil\n'), ((323, 345), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (340, 345), False, 'import argparse\n'), ((997, 1027), 'os.path.join', 'os.path.join', (['"""gen-py"""', '"""mapd"""'], {}), "('gen-py', 'mapd')\n", (1009, 1027), False, 'import os\n')]
|
from huffman import HuffZipFile
from os import listdir
from os.path import isfile, join, splitext
import hashlib
import time
def get_md5(path):
md5 = hashlib.md5()
with open(path, "rb") as f:
while True:
data = f.read(4096)
if not data:
break
md5.update(data)
return md5.hexdigest()
if __name__ == "__main__":
for f in listdir("testcase/"):
path = join("testcase/", f)
if isfile(path) and splitext(path)[1] != ".bak" and splitext(path)[1] != ".huff":
print("Start {}".format(f))
start_time = time.time()
from_file = open(path, "rb")
to_file = open(join("testcase/", splitext(f)[0] + ".huff"), "wb")
zip_file = HuffZipFile(decompress=False, file_stream=from_file)
zip_file.compress(to_file)
del zip_file
# quit()
print("File {} has finished compressing. Time {}. Decompressing...".format(f, time.time() - start_time))
start_time = time.time()
from_file = open(join("testcase/", splitext(f)[0] + ".huff"), "rb")
to_file = open(path + ".bak", "wb")
zip_file = HuffZipFile(decompress=True, file_stream=from_file)
zip_file.decompress(to_file)
del zip_file
print("File {} finished decompressing! Time {}.".format(f, time.time() - start_time))
md5_1 = get_md5(path)
md5_2 = get_md5(path + ".bak")
print("Result of {}".format(f))
if md5_1 != md5_2:
print("Wrong!")
else:
print("Right!")
print("")
|
[
"hashlib.md5",
"time.time",
"os.path.isfile",
"os.path.splitext",
"huffman.HuffZipFile",
"os.path.join",
"os.listdir"
] |
[((156, 169), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (167, 169), False, 'import hashlib\n'), ((401, 421), 'os.listdir', 'listdir', (['"""testcase/"""'], {}), "('testcase/')\n", (408, 421), False, 'from os import listdir\n'), ((438, 458), 'os.path.join', 'join', (['"""testcase/"""', 'f'], {}), "('testcase/', f)\n", (442, 458), False, 'from os.path import isfile, join, splitext\n'), ((470, 482), 'os.path.isfile', 'isfile', (['path'], {}), '(path)\n', (476, 482), False, 'from os.path import isfile, join, splitext\n'), ((614, 625), 'time.time', 'time.time', ([], {}), '()\n', (623, 625), False, 'import time\n'), ((768, 820), 'huffman.HuffZipFile', 'HuffZipFile', ([], {'decompress': '(False)', 'file_stream': 'from_file'}), '(decompress=False, file_stream=from_file)\n', (779, 820), False, 'from huffman import HuffZipFile\n'), ((1048, 1059), 'time.time', 'time.time', ([], {}), '()\n', (1057, 1059), False, 'import time\n'), ((1212, 1263), 'huffman.HuffZipFile', 'HuffZipFile', ([], {'decompress': '(True)', 'file_stream': 'from_file'}), '(decompress=True, file_stream=from_file)\n', (1223, 1263), False, 'from huffman import HuffZipFile\n'), ((487, 501), 'os.path.splitext', 'splitext', (['path'], {}), '(path)\n', (495, 501), False, 'from os.path import isfile, join, splitext\n'), ((519, 533), 'os.path.splitext', 'splitext', (['path'], {}), '(path)\n', (527, 533), False, 'from os.path import isfile, join, splitext\n'), ((996, 1007), 'time.time', 'time.time', ([], {}), '()\n', (1005, 1007), False, 'import time\n'), ((1402, 1413), 'time.time', 'time.time', ([], {}), '()\n', (1411, 1413), False, 'import time\n'), ((712, 723), 'os.path.splitext', 'splitext', (['f'], {}), '(f)\n', (720, 723), False, 'from os.path import isfile, join, splitext\n'), ((1108, 1119), 'os.path.splitext', 'splitext', (['f'], {}), '(f)\n', (1116, 1119), False, 'from os.path import isfile, join, splitext\n')]
|
#! python3
import pyautogui, sys, time
#print('Press Ctrl-C to quit.')
while True:
x, y = pyautogui.position()
positionStr = ',' + str(x).rjust(4) + ',' + str(y).rjust(4)
print( time.time(), positionStr, '\n', flush=True)
time.sleep(0.05)
|
[
"pyautogui.position",
"time.time",
"time.sleep"
] |
[((94, 114), 'pyautogui.position', 'pyautogui.position', ([], {}), '()\n', (112, 114), False, 'import pyautogui, sys, time\n'), ((238, 254), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (248, 254), False, 'import pyautogui, sys, time\n'), ((190, 201), 'time.time', 'time.time', ([], {}), '()\n', (199, 201), False, 'import pyautogui, sys, time\n')]
|
"""
Implementation of the `multicodec spec <https://github.com/multiformats/multicodec>`_.
Suggested usage:
>>> from multiformats import multicodec
"""
import importlib.resources as importlib_resources
from io import BufferedIOBase
import json
import re
import sys
from typing import AbstractSet, Any, cast, Dict, Iterable, Iterator, Mapping, Optional, overload, Set, Sequence, Tuple, Type, TypeVar, Union
from typing_extensions import Literal
from typing_validation import validate
from multiformats import varint
from multiformats.varint import BytesLike
# from . import err
from .err import MulticodecKeyError, MulticodecValueError
def _hexcode(code: int) -> str:
hexcode = hex(code)
if len(hexcode) % 2 != 0:
hexcode = "0x0"+hexcode[2:]
return hexcode
class Multicodec:
"""
Container class for a multicodec.
Example usage:
>>> Multicodec(**{
... 'name': 'cidv1', 'tag': 'cid', 'code': '0x01',
... 'status': 'permanent', 'description': 'CIDv1'})
Multicodec(name='cidv1', tag='cid', code=1,
status='permanent', description='CIDv1')
:param name: the multicodec name
:type name: :obj:`str`
:param tag: the multicodec tag
:type tag: :obj:`str`
:param code: the multicodec code, as integer or ``0xYZ`` hex-string
:type code: :obj:`int` or :obj:`str`
:param status: the multicodec status
:type status: ``'draft'`` or ``'permanent'``, *optional*
:param description: the multicodec description
:type description: :obj:`str`, *optional*
"""
_name: str
_tag: str
_code: int
_status: Literal["draft", "permanent"]
_description: str
__slots__ = ("__weakref__", "_name", "_tag", "_code", "_status", "_description")
def __init__(self, *,
name: str,
tag: str,
code: Union[int, str],
status: str = "draft",
description: str = ""
):
for arg in (name, tag, status, description):
validate(arg, str)
validate(code, Union[int, str])
name = Multicodec._validate_name(name)
code = Multicodec.validate_code(code)
status = Multicodec._validate_status(status)
self._name = name
self._tag = tag
self._code = code
self._status = status
self._description = description
@staticmethod
def _validate_name(name: str) -> str:
if not re.match(r"^[a-z][a-z0-9_-]+$", name):
raise MulticodecValueError(f"Invalid multicodec name {repr(name)}")
return name
@staticmethod
def validate_code(code: Union[int, str]) -> int:
"""
Validates a multicodec code and transforms it to unsigned integer format (if in hex format).
:param code: the multicodec code, as integer or `0xYZ` hex-string
:type code: :obj:`int` or :obj:`str`
:raises ValueError: if the code is invalid
"""
if isinstance(code, str):
if code.startswith("0x"):
code = code[2:]
code = int(code, base=16)
if code < 0:
raise MulticodecValueError(f"Invalid multicodec code {repr(code)}.")
return code
@staticmethod
def _validate_status(status: str) -> Literal["draft", "permanent"]:
if status not in ("draft", "permanent"):
raise MulticodecValueError(f"Invalid multicodec status {repr(status)}.")
return cast(Literal["draft", "permanent"], status)
@property
def name(self) -> str:
"""
Multicodec name. Must satisfy the following:
.. code-block:: python
re.match(r"^[a-z][a-z0-9_-]+$", name)
"""
return self._name
@property
def tag(self) -> str:
""" Multicodec tag. """
return self._tag
@property
def code(self) -> int:
""" Multicodec code. Must be a non-negative integer. """
return self._code
@property
def hexcode(self) -> str:
"""
Multicodec code as a hex string (with hex digits zero-padded to even length):
Example usage:
>>> m = multicodec.get(1)
>>> m.code
1
>>> m.hexcode
'0x01'
"""
return _hexcode(self._code)
@property
def status(self) -> Literal["draft", "permanent"]:
""" Multicodec status. """
return self._status
@property
def description(self) -> str:
""" Multicodec description. """
return self._description
@property
def is_private_use(self) -> bool:
"""
Whether this multicodec code is reserved for private use,
i.e. whether it is in ``range(0x300000, 0x400000)``.
"""
return self.code in range(0x300000, 0x400000)
def wrap(self, raw_data: BytesLike) -> bytes:
"""
Wraps raw binary data into multicodec data:
.. code-block:: console
<raw data> --> <code><raw data>
Example usage:
>>> ip4 = multicodec.get("ip4")
>>> ip4
Multicodec(name='ip4', tag='multiaddr', code='0x04', status='permanent', description='')
>>> raw_data = bytes([192, 168, 0, 254])
>>> multicodec_data = ip4.wrap(raw_data)
>>> raw_data.hex()
'c0a800fe'
>>> multicodec_data.hex()
'04c0a800fe'
>>> varint.encode(0x04).hex()
'04' # 0x04 ^^^^ is the multicodec code for 'ip4'
:param raw_data: the raw data to be wrapped
:type raw_data: :obj:`~multiformats.varint.BytesLike`
:raise ValueError: see :func:`~multiformats.varint.encode`
"""
return varint.encode(self.code)+raw_data
def unwrap(self, multicodec_data: BytesLike) -> bytes:
"""
Unwraps multicodec binary data to raw data:
.. code-block::
<code><raw data> --> <raw data>
Additionally checks that the code listed by the data
matches the code of this multicodec.
Example usage:
>>> multicodec_data = bytes.fromhex("c0a800fe")
>>> raw_data = ip4.unwrap(multicodec_data)
>>> multicodec_data.hex()
'04c0a800fe'
>>> raw_data.hex()
'c0a800fe'
>>> varint.encode(0x04).hex()
'04' # 0x04 ^^^^ is the multicodec code for 'ip4'
:param multicodec_data: the multicodec data to be unwrapped
:type multicodec_data: :obj:`~multiformats.varint.BytesLike`
:raise ValueError: if the unwrapped multicodec code does not match this multicodec's code
:raise ValueError: see :func:`multiformats.multicodec.unwrap_raw`
:raise KeyError: see :func:`multiformats.multicodec.unwrap_raw`
"""
code, _, raw_data = unwrap_raw(multicodec_data)
# code, _, raw_data = varint.decode_raw(multicodec_data)
if code != self.code:
hexcode = _hexcode(code)
raise MulticodecValueError(f"Found code {hexcode} when unwrapping data, expected code {self.hexcode}.")
return bytes(raw_data)
def to_json(self) -> Mapping[str, str]:
"""
Returns a JSON dictionary representation of this multicodec object.
Example usage:
>>> m = multicodec.get(1)
>>> m.to_json()
{'name': 'cidv1', 'tag': 'cid', 'code': '0x01',
'status': 'permanent', 'description': 'CIDv1'}
"""
return {
"name": self.name,
"tag": self.tag,
"code": self.hexcode,
"status": self.status,
"description": self.description
}
def __str__(self) -> str:
if exists(self.name) and get(self.name) == self:
return f"multicodec({repr(self.name)}, tag={repr(self.tag)})"
return repr(self)
def __repr__(self) -> str:
return f"Multicodec({', '.join(f'{k}={repr(v)}' for k, v in self.to_json().items())})"
@property
def _as_tuple(self) -> Tuple[Type["Multicodec"], str, str, int, Literal["draft", "permanent"]]:
return (Multicodec, self.name, self.tag, self.code, self.status)
def __hash__(self) -> int:
return hash(self._as_tuple)
def __eq__(self, other: Any) -> bool:
if self is other:
return True
if not isinstance(other, Multicodec):
return NotImplemented
return self._as_tuple == other._as_tuple
def get(name: Optional[str] = None, *, code: Optional[int] = None) -> Multicodec:
"""
Gets the multicodec with given name or code.
Example usage:
>>> multicodec.get("identity")
Multicodec(name='identity', tag='multihash', code=0,
status='permanent', description='raw binary')
>>> multicodec.get(code=0x01)
Multicodec(name='cidv1', tag='ipld', code=1,
status='permanent', description='CIDv1')
:param name: the multicodec name
:type name: :obj:`str` or :obj:`None`, *optional*
:param code: the multicodec code
:type code: :obj:`int` or :obj:`None`, *optional*
:raises KeyError: if no such multicodec exists
:raises ValueError: unless exactly one of ``name`` and ``code`` is specified
"""
validate(name, Optional[str])
validate(code, Optional[int])
if (name is None) == (code is None):
raise MulticodecValueError("Must specify exactly one between 'name' and 'code'.")
if name is not None:
if name not in _name_table:
raise MulticodecKeyError(f"No multicodec named {repr(name)}.")
return _name_table[name]
if code not in _code_table:
raise MulticodecKeyError(f"No multicodec with code {repr(code)}.")
return _code_table[code]
def multicodec(name: str, *, tag: Optional[str] = None) -> Multicodec:
"""
An alias for :func:`get`, for use with multicodec name only.
If a tag is passed, ensures that the multicodec tag matches the one given.
Example usage:
>>> from multiformats.multicodec import multicodec
>>> multicodec("identity")
Multicodec(name='identity', tag='multihash', code=0,
status='permanent', description='raw binary')
:param name: the multicodec name
:type name: :obj:`str`
:param tag: the optional multicodec tag
:type tag: :obj:`str` or :obj:`None`, *optional*
:raises KeyError: see :func:`get`
"""
codec = get(name)
if tag is not None and codec.tag != tag:
raise MulticodecKeyError(f"Multicodec {repr(name)} exists, but its tag is not {repr(tag)}.")
return codec
def exists(name: Union[None, str, Multicodec] = None, *, code: Optional[int] = None) -> bool:
"""
Checks whether there is a multicodec with the given name or code.
Example usage:
>>> multicodec.exists("identity")
True
>>> multicodec.exists(code=0x01)
True
:param name: the multicodec name
:type name: :obj:`str` or :obj:`None`, *optional*
:param code: the multicodec code
:type code: :obj:`int` or :obj:`None`, *optional*
:raises ValueError: unless exactly one of ``name`` and ``code`` is specified
"""
validate(name, Optional[str])
validate(code, Optional[int])
if (name is None) == (code is None):
raise MulticodecValueError("Must specify exactly one between 'name' and 'code'.")
if name is not None:
return name in _name_table
return code in _code_table
def wrap(codec: Union[str, int, Multicodec], raw_data: BytesLike) -> bytes:
"""
Wraps raw binary data into multicodec data:
.. code-block::
<raw data> --> <code><raw data>
Example usage:
>>> raw_data = bytes([192, 168, 0, 254])
>>> multicodec_data = multicodec.wrap("ip4", raw_data)
>>> raw_data.hex()
'c0a800fe'
>>> multicodec_data.hex()
'04c0a800fe'
>>> varint.encode(0x04).hex()
'04' # 0x04 ^^^^ is the multicodec code for 'ip4'
:param codec: the multicodec that the raw data refers to
:type codec: :obj:`str`, :obj:`int` or :class:`Multicodec`
:param raw_data: the raw binary data
:type raw_data: :obj:`~multiformats.varint.BytesLike`
:raises KeyError: see :func:`get`
"""
if isinstance(codec, str):
codec = get(codec)
elif isinstance(codec, int):
codec = get(code=codec)
else:
validate(codec, Multicodec)
return codec.wrap(raw_data)
def unwrap(multicodec_data: BytesLike) -> Tuple[Multicodec, bytes]:
"""
Unwraps multicodec binary data to multicodec and raw data:
Example usage:
>>> multicodec_data = bytes.fromhex("04c0a800fe")
>>> codec, raw_data = multicodec.unwrap(multicodec_data)
>>> multicodec_data.hex()
'04c0a800fe'
>>> raw_data.hex()
'c0a800fe'
>>> codec
Multicodec(name='ip4', tag='multiaddr', code='0x04', status='permanent', description='')
:param multicodec_data: the binary data prefixed with multicodec code
:type multicodec_data: :obj:`~multiformats.varint.BytesLike`
:raises KeyError: if the code does not correspond to a know multicodec
"""
code, _, raw_data = unwrap_raw(multicodec_data)
return get(code=code), bytes(raw_data)
_BufferedIOT = TypeVar("_BufferedIOT", bound=BufferedIOBase)
@overload
def unwrap_raw(multicodec_data: BytesLike) -> Tuple[int, int, memoryview]:
...
@overload
def unwrap_raw(multicodec_data: _BufferedIOT) -> Tuple[int, int, _BufferedIOT]:
...
def unwrap_raw(multicodec_data: Union[BytesLike, BufferedIOBase]) -> Tuple[int, int, Union[memoryview, BufferedIOBase]]:
"""
Similar to :func:`unwrap`, but returns a triple of multicodec code, number of bytes read and remaining bytes.
Example usage:
>>> multicodec_data = bytes.fromhex("04c0a800fe")
>>> code, num_bytes_read, remaining_bytes = multicodec.unwrap_raw(multicodec_data)
>>> code
4
>>> num_bytes_read
1
>>> remaining_bytes
<memory at 0x000001BE46B17640>
>>> multicodec_data.hex()
'04c0a800fe'
>>> bytes(remaining_bytes).hex()
'c0a800fe'
:param multicodec_data: the binary data prefixed with multicodec code
:type multicodec_data: :obj:`~multiformats.varint.BytesLike`
:raises KeyError: if the code does not correspond to a know multicodec
"""
code, n, raw_data = varint.decode_raw(multicodec_data)
if not exists(code=code):
raise MulticodecKeyError(f"No multicodec is known with unwrapped code {_hexcode(code)}.")
return code, n, raw_data
def validate_multicodec(codec: Multicodec) -> None:
"""
Validates an instance of :class:`Multicodec`.
If the multicodec is registered (i.e. valid), no error is raised.
:param codec: the instance to be validated
:type codec: :class:`Multicodec`
:raises KeyError: if no multicodec with the given name is registered
:raises ValueError: if a multicodec with the given name is registered, but is different from the one given
"""
validate(codec, Multicodec)
mc = get(codec.name)
if mc != codec:
raise MulticodecValueError(f"Multicodec named {codec.name} exists, but is not the one given.")
def register(codec: Multicodec, *, overwrite: bool = False) -> None:
"""
Registers a given multicodec.
Example usage:
>>> m = Multicodec("my-multicodec", "my-tag", 0x300001, "draft", "...")
>>> multicodec.register(m)
>>> multicodec.exists(code=0x300001)
True
>>> multicodec.get(code=0x300001).name
'my-multicodec'
>>> multicodec.get(code=0x300001).is_private_use
True
:param codec: the multicodec to register
:type codec: :class:`Multicodec`
:param overwrite: whether to overwrite a multicodec with existing code (optional, default :obj:`False`)
:type overwrite: :obj:`bool`, *optional*
:raises ValueError: if ``overwrite`` is :obj:`False` and a multicodec with the same name or code already exists
:raises ValueError: if ``overwrite`` is :obj:`True` and a multicodec with the same name but different code already exists
"""
validate(codec, Multicodec)
validate(overwrite, bool)
if not overwrite and codec.code in _code_table:
raise MulticodecValueError(f"Multicodec with code {repr(codec.code)} already exists: {_code_table[codec.code]}")
if codec.name in _name_table and _name_table[codec.name].code != codec.code:
raise MulticodecValueError(f"Multicodec with name {repr(codec.name)} already exists: {_name_table[codec.name]}")
_code_table[codec.code] = codec
_name_table[codec.name] = codec
def unregister(name: Optional[str] = None, *, code: Optional[int] = None) -> None:
"""
Unregisters the multicodec with given name or code.
Example usage:
>>> multicodec.unregister(code=0x01) # cidv1
>>> multicodec.unregister(code=0x01)
False
:param name: the multicodec name
:type name: :obj:`str` or :obj:`None`, *optional*
:param code: the multicodec code
:type code: :obj:`int` or :obj:`None`, *optional*
:raises KeyError: if no such multicodec exists
:raises ValueError: unless exactly one of ``name`` and ``code`` is specified
"""
m = get(name, code=code)
del _code_table[m.code]
del _name_table[m.name]
def table(*,
tag: Union[None, str, AbstractSet[str], Sequence[str]] = None,
status: Union[None, str, AbstractSet[str], Sequence[str]] = None) -> Iterator[Multicodec]:
"""
Iterates through the registered multicodecs, in order of ascending code.
Example usage:
>>> len(list(multicodec.table())) # multicodec.table() returns an iterator
482
>>> selected = multicodec.table(tag=["cid", "cid", "multiaddr"], status="permanent")
>>> [m.code for m in selected]
[1, 4, 6, 41, 53, 54, 55, 56, 81, 85, 112, 113, 114, 120,
144, 145, 146, 147, 148, 149, 150, 151, 152, 176, 177,
178, 192, 193, 290, 297, 400, 421, 460, 477, 478, 479, 512]
:param tag: one or more tags to be selected (if :obj:`None`, all tags are included)
:type tag: :obj:`None`, :obj:`str`, set or sequence of :obj:`str`, *optional*
:param status: one or more statuses to be selected (if :obj:`None`, all statuses are included)
:type status: :obj:`None`, :obj:`str`, set or sequence of :obj:`str`, *optional*
"""
validate(tag, Union[None, str, AbstractSet[str], Sequence[str]])
validate(status, Union[None, str, AbstractSet[str], Sequence[str]])
tags: Union[None, AbstractSet[str], Sequence[str]]
if tag is None:
tags = None
elif isinstance(tag, str):
tags = [tag]
else:
tags = tag
statuses: Union[None, AbstractSet[str], Sequence[str]]
if status is None:
statuses = None
elif isinstance(status, str):
statuses = [status]
else:
statuses = status
for code in sorted(_code_table.keys()):
m = _code_table[code]
if tags is not None and m.tag not in tags:
continue
if statuses is not None and m.status not in statuses:
continue
yield m
def build_multicodec_tables(codecs: Iterable[Multicodec], *,
allow_private_use: bool = False) -> Tuple[Dict[int, Multicodec], Dict[str, Multicodec]]:
"""
Creates code->multicodec and name->multicodec mappings from a finite iterable of multicodecs,
returning the mappings.
Example usage:
>>> code_table, name_table = build_multicodec_tables(codecs)
:param codecs: multicodecs to be registered
:type codecs: iterable of :class:`Multicodec`
:param allow_private_use: whether to allow multicodec entries with private use codes in ``range(0x300000, 0x400000)`` (default :obj:`False`)
:type allow_private_use: :obj:`bool`, *optional*
:raises ValueError: if ``allow_private_use`` and a multicodec with private use code is encountered
:raises ValueError: if the same multicodec code is encountered multiple times, unless exactly one of the multicodecs
has permanent status (in which case that codec is the one inserted in the table)
:raises ValueError: if the same name is encountered multiple times
"""
# validate(codecs, Iterable[Multicodec]) # TODO: not yet properly supported by typing-validation
validate(allow_private_use, bool)
code_table: Dict[int, Multicodec] = {}
name_table: Dict[str, Multicodec] = {}
overwritten_draft_codes: Set[int] = set()
for m in codecs:
if not allow_private_use and m.is_private_use:
raise MulticodecValueError(f"Private use multicodec not allowed: {m}")
if m.code in code_table:
if code_table[m.code].status == "permanent":
if m.status == "draft":
# this draft code has been superseded by a permanent one, skip it
continue
raise MulticodecValueError(f"Multicodec code {m.hexcode} appears multiple times in table.")
if m.status != "permanent":
# overwriting draft code with another draft code: dodgy, need to check at the end
overwritten_draft_codes.add(m.code)
code_table[m.code] = m
if m.name in name_table:
raise MulticodecValueError(f"Multicodec name {m.name} appears multiple times in table.")
name_table[m.name] = m
for code in overwritten_draft_codes:
m = code_table[code]
if m.status != "permanent":
raise MulticodecValueError(f"Code {m.code} appears multiple times in table, "
"but none of the associated multicodecs is permanent.")
return code_table, name_table
# Create the global code->multicodec and name->multicodec mappings.
_code_table: Dict[int, Multicodec]
_name_table: Dict[str, Multicodec]
with importlib_resources.open_text("multiformats.multicodec", "multicodec-table.json") as _table_f:
_table_json = json.load(_table_f)
_code_table, _name_table = build_multicodec_tables(Multicodec(**row) for row in _table_json)
|
[
"importlib.resources.open_text",
"multiformats.varint.decode_raw",
"json.load",
"typing.cast",
"multiformats.varint.encode",
"re.match",
"typing_validation.validate",
"typing.TypeVar"
] |
[((13775, 13820), 'typing.TypeVar', 'TypeVar', (['"""_BufferedIOT"""'], {'bound': 'BufferedIOBase'}), "('_BufferedIOT', bound=BufferedIOBase)\n", (13782, 13820), False, 'from typing import AbstractSet, Any, cast, Dict, Iterable, Iterator, Mapping, Optional, overload, Set, Sequence, Tuple, Type, TypeVar, Union\n'), ((9588, 9617), 'typing_validation.validate', 'validate', (['name', 'Optional[str]'], {}), '(name, Optional[str])\n', (9596, 9617), False, 'from typing_validation import validate\n'), ((9622, 9651), 'typing_validation.validate', 'validate', (['code', 'Optional[int]'], {}), '(code, Optional[int])\n', (9630, 9651), False, 'from typing_validation import validate\n'), ((11589, 11618), 'typing_validation.validate', 'validate', (['name', 'Optional[str]'], {}), '(name, Optional[str])\n', (11597, 11618), False, 'from typing_validation import validate\n'), ((11623, 11652), 'typing_validation.validate', 'validate', (['code', 'Optional[int]'], {}), '(code, Optional[int])\n', (11631, 11652), False, 'from typing_validation import validate\n'), ((14944, 14978), 'multiformats.varint.decode_raw', 'varint.decode_raw', (['multicodec_data'], {}), '(multicodec_data)\n', (14961, 14978), False, 'from multiformats import varint\n'), ((15625, 15652), 'typing_validation.validate', 'validate', (['codec', 'Multicodec'], {}), '(codec, Multicodec)\n', (15633, 15652), False, 'from typing_validation import validate\n'), ((16771, 16798), 'typing_validation.validate', 'validate', (['codec', 'Multicodec'], {}), '(codec, Multicodec)\n', (16779, 16798), False, 'from typing_validation import validate\n'), ((16803, 16828), 'typing_validation.validate', 'validate', (['overwrite', 'bool'], {}), '(overwrite, bool)\n', (16811, 16828), False, 'from typing_validation import validate\n'), ((19113, 19177), 'typing_validation.validate', 'validate', (['tag', 'Union[None, str, AbstractSet[str], Sequence[str]]'], {}), '(tag, Union[None, str, AbstractSet[str], Sequence[str]])\n', (19121, 19177), False, 'from typing_validation import validate\n'), ((19182, 19249), 'typing_validation.validate', 'validate', (['status', 'Union[None, str, AbstractSet[str], Sequence[str]]'], {}), '(status, Union[None, str, AbstractSet[str], Sequence[str]])\n', (19190, 19249), False, 'from typing_validation import validate\n'), ((21115, 21148), 'typing_validation.validate', 'validate', (['allow_private_use', 'bool'], {}), '(allow_private_use, bool)\n', (21123, 21148), False, 'from typing_validation import validate\n'), ((22639, 22724), 'importlib.resources.open_text', 'importlib_resources.open_text', (['"""multiformats.multicodec"""', '"""multicodec-table.json"""'], {}), "('multiformats.multicodec',\n 'multicodec-table.json')\n", (22668, 22724), True, 'import importlib.resources as importlib_resources\n'), ((22752, 22771), 'json.load', 'json.load', (['_table_f'], {}), '(_table_f)\n', (22761, 22771), False, 'import json\n'), ((2147, 2178), 'typing_validation.validate', 'validate', (['code', 'Union[int, str]'], {}), '(code, Union[int, str])\n', (2155, 2178), False, 'from typing_validation import validate\n'), ((3576, 3619), 'typing.cast', 'cast', (["Literal['draft', 'permanent']", 'status'], {}), "(Literal['draft', 'permanent'], status)\n", (3580, 3619), False, 'from typing import AbstractSet, Any, cast, Dict, Iterable, Iterator, Mapping, Optional, overload, Set, Sequence, Tuple, Type, TypeVar, Union\n'), ((2120, 2138), 'typing_validation.validate', 'validate', (['arg', 'str'], {}), '(arg, str)\n', (2128, 2138), False, 'from typing_validation import validate\n'), ((2547, 2583), 're.match', 're.match', (['"""^[a-z][a-z0-9_-]+$"""', 'name'], {}), "('^[a-z][a-z0-9_-]+$', name)\n", (2555, 2583), False, 'import re\n'), ((5914, 5938), 'multiformats.varint.encode', 'varint.encode', (['self.code'], {}), '(self.code)\n', (5927, 5938), False, 'from multiformats import varint\n'), ((12857, 12884), 'typing_validation.validate', 'validate', (['codec', 'Multicodec'], {}), '(codec, Multicodec)\n', (12865, 12884), False, 'from typing_validation import validate\n')]
|
from airflow.hooks.base_hook import BaseHook
def get_conn(conn_id):
# get connection by name from BaseHook
conn = BaseHook.get_connection(conn_id)
return conn
|
[
"airflow.hooks.base_hook.BaseHook.get_connection"
] |
[((124, 156), 'airflow.hooks.base_hook.BaseHook.get_connection', 'BaseHook.get_connection', (['conn_id'], {}), '(conn_id)\n', (147, 156), False, 'from airflow.hooks.base_hook import BaseHook\n')]
|
from __future__ import absolute_import
import sys
import os.path
import logging
import random
import FWCore.ParameterSet.SequenceTypes as sqt
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.Modules as mod
import FWCore.ParameterSet.Types as typ
import FWCore.ParameterSet.Mixins as mix
from .Vispa.Plugins.ConfigEditor.ConfigDataAccessor import ConfigDataAccessor
from FWCore.GuiBrowsers.FileExportPlugin import FileExportPlugin
class JsonExport(FileExportPlugin):
option_types={}
plugin_name='JSON Export'
file_types=('html','json')
def __init__(self):
FileExportPlugin.__init__(self)
def produce(self,data):
#pset = lambda pdict: [[k,repr(v).split('(',1)[0],(repr(v).split('(',1)[1][:-1])] for k,v in pdict.items()]
def pset(pdict):
result = []
for k,v in pdict.items():
if v.pythonTypeName()=='cms.PSet' or v.pythonTypeName()=='cms.untracked.PSet':
result.append([k,v.pythonTypeName(),'pset',pset(v.parameters_())])
elif v.pythonTypeName()=='cms.VPSet' or v.pythonTypeName()=='cms.untracked.VPSet':
result.append([k,v.pythonTypeName(),'vpset',[pset(a.parameters_()) for a in v]])
elif v.pythonTypeName().lower().startswith('cms.v') or v.pythonTypeName().lower().startswith('cms.untracked.v'):
result.append([k,v.pythonTypeName(),'list',[repr(a) for a in v]])
else:
result.append([k,v.pythonTypeName(),'single',repr(v.pythonValue())])
return result
#allObjects = [d for d in data._allObjects if (data.type(d) in ("EDProducer","EDFilter","EDAnalyzer","OutputModule"))]
#data.readConnections(allObjects)
def moduledict(mod,prefix,links=False):
result={}
result['label']=data.label(mod)
result['class']=data.classname(mod)
result['file']=data.pypath(mod)
result['line']=data.lineNumber(mod)
result['package']=data.pypackage(mod)
result['pset']=pset(mod.parameters_())
result['type']=data.type(mod)
if links:
result['uses']=[data.uses(mod)]
result['usedby']=[data.usedBy(mod)]
result['id']='%s_%s'%(prefix,data.label(mod))
return result
all={}
for tlo in data.topLevelObjects():
children=data.children(tlo)
if children:
all[tlo._label]=children
process = {'name':data.process().name_(),'src':data._filename}
#now unavailable
#schedule = []
#if 'Schedule' in all:
# for s in all['Schedule']:
# schedule.append(data.label(s))
source={}
if 'source' in all:
s = all['source'][0]
source['class']=data.classname(s)
source['pset']=pset(s.parameters_())
essources=[]
if 'essources' in all:
for e in all['essources']:
essources.append(moduledict(e,'essource'))
esproducers=[]
if 'esproducers' in all:
for e in all['esproducers']:
essources.append(moduledict(e,'esproducer'))
esprefers=[]
if 'esprefers' in all:
for e in all['esprefers']:
essources.append(moduledict(e,'esprefers'))
services=[]
if 'services' in all:
for s in all['services']:
services.append({'class':data.classname(s),'pset':pset(s.parameters_())})
def jsonPathRecursive(p,prefix):
#print "At:",self.label(p),self.type(p)
children = data.children(p)
if children:
children = [jsonPathRecursive(c,prefix) for c in children]
return {'type':'Sequence','label':'Sequence %s'%(data.label(p)),'id':'seq_%s' % data.label(p),'children':children}
else:
return moduledict(p,prefix,True)
paths=[]
if 'paths' in all:
for p in all['paths']:
path=jsonPathRecursive(p,data.label(p))
if path:
if not isinstance(path, type([])):
if path['type']=='Sequence':
path = path['children']
else:
path = [path]
else:
path=[]
paths.append({'label':data.label(p),'path':path})
endpaths=[]
if 'endpaths' in all:
for p in all['endpaths']:
path=jsonPathRecursive(p,data.label(p))
if path:
if not isinstance(path, type([])):
if path['type']=='Sequence':
path = path['children']
else:
path = [path]
else:
path=[]
endpaths.append({'label':data.label(p),'path':path})
#json={'process':process,'schedule':schedule,'source':source,'essources':essources,'esproducers':esproducers,'esprefers':esprefers,'services':services,'paths':paths,'endpaths':endpaths}
json={'process':process,'source':source,'essources':essources,'esproducers':esproducers,'esprefers':esprefers,'services':services,'paths':paths,'endpaths':endpaths}
return repr(json)
def export(self,data,filename,filetype):
if not data.process():
raise "JSONExport requires a cms.Process object"
json = self.produce(data)
if filetype=='json':
jsonfile = open(filename,'w')
jsonfile.write(json)
jsonfile.close()
if filetype=='html':
#open the HTML template and inject the JSON...
pass
|
[
"FWCore.GuiBrowsers.FileExportPlugin.FileExportPlugin.__init__",
"FWCore.ParameterSet.Modules.parameters_"
] |
[((589, 620), 'FWCore.GuiBrowsers.FileExportPlugin.FileExportPlugin.__init__', 'FileExportPlugin.__init__', (['self'], {}), '(self)\n', (614, 620), False, 'from FWCore.GuiBrowsers.FileExportPlugin import FileExportPlugin\n'), ((1963, 1980), 'FWCore.ParameterSet.Modules.parameters_', 'mod.parameters_', ([], {}), '()\n', (1978, 1980), True, 'import FWCore.ParameterSet.Modules as mod\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import register_criterion
from .label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion
@register_criterion('label_smoothed_cross_entropy_with_regularization')
class LabelSmoothedCrossEntropyCriterionWithRegularization(LabelSmoothedCrossEntropyCriterion):
def __init__(self, task, sentence_avg, label_smoothing, regularization_weight):
super().__init__(task, sentence_avg, label_smoothing)
self.regularization_weight = regularization_weight
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
LabelSmoothedCrossEntropyCriterion.add_args(parser)
parser.add_argument('--regularization_weight', default=1.0, type=float, metavar='D',
help='weight for the regularization loss')
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
if 'primary' not in sample or 'secondary' not in sample:
return super().forward(model, sample, reduce=reduce)
primary_net_output = model(**sample['primary']['net_input'])
primary_loss, primary_nll_loss = self.compute_loss(model, primary_net_output, sample['primary'], reduce=reduce)
primary_sample_size = sample['primary']['target'].size(0) if self.sentence_avg else sample['primary']['ntokens']
secondary_net_output = model(**sample['secondary']['net_input'])
secondary_loss, secondary_nll_loss = self.compute_loss(model, secondary_net_output, sample['secondary'], reduce=reduce)
secondary_sample_size = sample['secondary']['target'].size(0) if self.sentence_avg else sample['secondary']['ntokens']
primary_targets = model.get_targets(sample['primary'], primary_net_output).unsqueeze(-1)
secondary_targets = model.get_targets(sample['secondary'], secondary_net_output).unsqueeze(-1)
pad_mask = primary_targets.eq(self.padding_idx) | secondary_targets.eq(self.padding_idx)
regularization_loss = self.compute_regularization_loss(model, primary_net_output, secondary_net_output, pad_mask=pad_mask, reduce=reduce)
loss = primary_loss + secondary_loss + self.regularization_weight * regularization_loss
nll_loss = primary_nll_loss + secondary_nll_loss
ntokens = sample['primary']['ntokens'] + sample['secondary']['ntokens']
nsentences = sample['primary']['target'].size(0) + sample['secondary']['target'].size(0)
sample_size = primary_sample_size + secondary_sample_size
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'nll_loss': utils.item(nll_loss.data) if reduce else nll_loss.data,
'regularization_loss': utils.item(regularization_loss.data) if reduce else regularization_loss.data,
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_regularization_loss(self, model, primary_net_output, secondary_net_output, pad_mask=None, reduce=True):
mean_net_output = (primary_net_output[0] + secondary_net_output[0]) / 2
m = model.get_normalized_probs((mean_net_output,), log_probs=False)
p = model.get_normalized_probs(primary_net_output, log_probs=True)
q = model.get_normalized_probs(secondary_net_output, log_probs=True)
primary_loss = F.kl_div(p, m, reduction='none')
secondary_loss = F.kl_div(q, m, reduction='none')
if pad_mask is not None:
primary_loss.masked_fill_(pad_mask, 0.)
secondary_loss.masked_fill_(pad_mask, 0.)
if reduce:
primary_loss = primary_loss.sum()
secondary_loss = secondary_loss.sum()
loss = (primary_loss + secondary_loss) / 2
return loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get('loss', 0) for log in logging_outputs))
nll_loss_sum = utils.item(sum(log.get('nll_loss', 0) for log in logging_outputs))
regularization_loss_sum = utils.item(sum(log.get('regularization_loss', 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get('ntokens', 0) for log in logging_outputs))
sample_size = utils.item(sum(log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('nll_loss', nll_loss_sum / ntokens / math.log(2), ntokens, round=3)
metrics.log_scalar('regularization_loss', regularization_loss_sum / sample_size, sample_size, round=3)
metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
[
"fairseq.utils.get_perplexity",
"fairseq.utils.item",
"torch.nn.functional.kl_div",
"math.log",
"fairseq.criterions.register_criterion",
"fairseq.metrics.log_scalar"
] |
[((390, 460), 'fairseq.criterions.register_criterion', 'register_criterion', (['"""label_smoothed_cross_entropy_with_regularization"""'], {}), "('label_smoothed_cross_entropy_with_regularization')\n", (408, 460), False, 'from fairseq.criterions import register_criterion\n'), ((3928, 3960), 'torch.nn.functional.kl_div', 'F.kl_div', (['p', 'm'], {'reduction': '"""none"""'}), "(p, m, reduction='none')\n", (3936, 3960), True, 'import torch.nn.functional as F\n'), ((3986, 4018), 'torch.nn.functional.kl_div', 'F.kl_div', (['q', 'm'], {'reduction': '"""none"""'}), "(q, m, reduction='none')\n", (3994, 4018), True, 'import torch.nn.functional as F\n'), ((5142, 5248), 'fairseq.metrics.log_scalar', 'metrics.log_scalar', (['"""regularization_loss"""', '(regularization_loss_sum / sample_size)', 'sample_size'], {'round': '(3)'}), "('regularization_loss', regularization_loss_sum /\n sample_size, sample_size, round=3)\n", (5160, 5248), False, 'from fairseq import metrics, utils\n'), ((3064, 3085), 'fairseq.utils.item', 'utils.item', (['loss.data'], {}), '(loss.data)\n', (3074, 3085), False, 'from fairseq import metrics, utils\n'), ((3136, 3161), 'fairseq.utils.item', 'utils.item', (['nll_loss.data'], {}), '(nll_loss.data)\n', (3146, 3161), False, 'from fairseq import metrics, utils\n'), ((3227, 3263), 'fairseq.utils.item', 'utils.item', (['regularization_loss.data'], {}), '(regularization_loss.data)\n', (3237, 3263), False, 'from fairseq import metrics, utils\n'), ((5004, 5015), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (5012, 5015), False, 'import math\n'), ((5103, 5114), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (5111, 5114), False, 'import math\n'), ((5295, 5339), 'fairseq.utils.get_perplexity', 'utils.get_perplexity', (["meters['nll_loss'].avg"], {}), "(meters['nll_loss'].avg)\n", (5315, 5339), False, 'from fairseq import metrics, utils\n')]
|
import sys
import os
import csv
import decimal
import pytest
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
import votelib.candidate
import votelib.convert
import votelib.evaluate.threshold
import votelib.evaluate.proportional
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
@pytest.fixture(scope='module')
def sk_nr_2020_data():
fpath = os.path.join(DATA_DIR, 'sk_nr_2020.csv')
with open(fpath, encoding='utf8') as infile:
rows = list(csv.reader(infile, delimiter=';'))
party_names, coalflags, votes, seats = [list(x) for x in zip(*rows)]
parties = [
votelib.candidate.Coalition(name=name, parties=[
votelib.candidate.PoliticalParty(pname)
for pname in name.split('-')
])
if int(coalflag) else votelib.candidate.PoliticalParty(name)
for name, coalflag in zip(party_names, coalflags)
]
return dict(zip(parties, [int(v) for v in votes])), {
party: int(n_seats)
for party, n_seats in zip(parties, seats) if int(n_seats) > 0
}
def get_sk_nr_evaluator():
standard_elim = votelib.evaluate.threshold.RelativeThreshold(
decimal.Decimal('.05'), accept_equal=True
)
mem_2_3_elim = votelib.evaluate.threshold.RelativeThreshold(
decimal.Decimal('.07'), accept_equal=True
)
mem_4plus_elim = votelib.evaluate.threshold.RelativeThreshold(
decimal.Decimal('.1'), accept_equal=True
)
eliminator = votelib.evaluate.threshold.CoalitionMemberBracketer(
{1: standard_elim, 2: mem_2_3_elim, 3: mem_2_3_elim},
mem_4plus_elim
)
# main evaluator
evaluator = votelib.evaluate.proportional.LargestRemainder(
'hagenbach_bischoff_rounded'
)
# TODO: missing provisions for tie handling and low amount of candidates
return votelib.evaluate.core.Conditioned(eliminator, evaluator)
def test_sk_nr_2020(sk_nr_2020_data):
votes, results = sk_nr_2020_data
nominator = votelib.candidate.PartyNominator()
for cand in votes.keys():
nominator.validate(cand)
assert get_sk_nr_evaluator().evaluate(votes, 150) == results
CZ_EP_EVALUATOR = votelib.evaluate.core.FixedSeatCount(
votelib.evaluate.core.Conditioned(
votelib.evaluate.threshold.RelativeThreshold(
decimal.Decimal('.05'), accept_equal=True
),
votelib.evaluate.proportional.HighestAverages('d_hondt')
),
21
)
def test_cz_ep_2019():
votes = {
'Klub angažovaných nestraníků': 2580,
'Strana nezávislosti ČR': 9676,
'Cesta odpovědné společnosti': 7890,
'Národní socialisté': 1312,
'Občanská demokratická strana': 344885,
'ANO, vytrollíme europarlament': 37046,
'Česká strana sociálně demokratická': 93664,
'Romská demokratická strana': 1651,
'KSČM': 164624,
'Koalice DSSS a NF': 4363,
'SPR-RSČ': 4284,
'<NAME>, ND': 18715,
'Pravý Blok': 4752,
'NE-VOLIM.CZ': 2221,
'Pro Česko': 2760,
'Vědci pro Českou republiku': 19492,
'Koalice ČSNS, Patrioti ČR': 1289,
'JSI PRO?Jist.Solid.In.pro bud.': 836,
'PRO Zdraví a Sport': 7868,
'Moravské zemské hnutí': 3195,
'Česká Suverenita': 2609,
'TVŮJ KANDIDÁT': 1653,
'HLAS': 56449,
'<NAME>, RČ': 15492,
'<NAME>AN, TOP 09': 276220,
'Česká pirátská strana': 330844,
'Svoboda a přímá demokracie': 216718,
'Aliance národních sil': 1971,
'ANO 2011': 502343,
'Agrární demokratická strana': 4004,
'Moravané': 6599,
'První Republika': 844,
'Demokratická strana zelených': 14339,
'Bezpečnost,Odpovědnost,Solid.': 2583,
'<NAME>kromníci, NEZ': 8720,
'Evropa společně': 12587,
'Konzervativní Alternativa': 235,
'KDU-ČSL': 171723,
'Alternativa pro Česk. rep.2017': 11729,
}
results = {
'ANO 2011': 6,
'Občanská demokratická strana': 4,
'Česká pirátská strana': 3,
'Koalice STAN, TOP 09': 3,
'Svoboda a přímá demokracie': 2,
'KDU-ČSL': 2,
'KSČM': 1,
}
assert CZ_EP_EVALUATOR.evaluate(votes) == results
CZ_PSP_EVALUATOR = votelib.evaluate.core.ByConstituency(
votelib.evaluate.proportional.HighestAverages('d_hondt'),
votelib.evaluate.proportional.LargestRemainder('hare'),
preselector=votelib.evaluate.threshold.RelativeThreshold(
decimal.Decimal('.05'), accept_equal=True
)
)
@pytest.fixture(scope='module')
def cz_psp_2017_votes():
fpath = os.path.join(DATA_DIR, 'cz_psp_2017.csv')
with open(fpath, encoding='utf8') as infile:
rows = list(csv.reader(infile, delimiter=';'))
region_names = rows[0][1:]
votes = {region: {} for region in region_names}
for row in rows[1:]:
party = row[0]
for regname, n_votes in zip(region_names, row[1:]):
votes[regname][party] = int(n_votes)
return votes
def test_cz_psp_2017(cz_psp_2017_votes):
reg_results = CZ_PSP_EVALUATOR.evaluate(cz_psp_2017_votes, 200)
nat_agg = votelib.convert.VoteTotals()
assert nat_agg.convert(reg_results) == {
'ANO': 78,
'ODS': 25,
'Piráti': 22,
'SPD': 22,
'ČSSD': 15,
'KSČM': 15,
'KDU-ČSL': 10,
'TOP 09': 7,
'STAN': 6,
}
assert reg_results['Hlavní město Praha'] == {
'ANO': 6,
'ODS': 5,
'Piráti': 5,
'SPD': 1,
'ČSSD': 1,
'KSČM': 1,
'KDU-ČSL': 1,
'TOP 09': 3,
'STAN': 1,
}
assert reg_results['Karlovarský kraj'] == {
'ANO': 3,
'Piráti': 1,
'SPD': 1,
}
def get_evaluators():
return [
CZ_EP_EVALUATOR,
CZ_PSP_EVALUATOR,
get_sk_nr_evaluator(),
]
|
[
"csv.reader",
"decimal.Decimal",
"os.path.dirname",
"pytest.fixture",
"os.path.join"
] |
[((314, 344), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (328, 344), False, 'import pytest\n'), ((4559, 4589), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (4573, 4589), False, 'import pytest\n'), ((277, 302), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (292, 302), False, 'import os\n'), ((380, 420), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""sk_nr_2020.csv"""'], {}), "(DATA_DIR, 'sk_nr_2020.csv')\n", (392, 420), False, 'import os\n'), ((4627, 4668), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""cz_psp_2017.csv"""'], {}), "(DATA_DIR, 'cz_psp_2017.csv')\n", (4639, 4668), False, 'import os\n'), ((93, 118), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (108, 118), False, 'import os\n'), ((1173, 1195), 'decimal.Decimal', 'decimal.Decimal', (['""".05"""'], {}), "('.05')\n", (1188, 1195), False, 'import decimal\n'), ((1294, 1316), 'decimal.Decimal', 'decimal.Decimal', (['""".07"""'], {}), "('.07')\n", (1309, 1316), False, 'import decimal\n'), ((1417, 1438), 'decimal.Decimal', 'decimal.Decimal', (['""".1"""'], {}), "('.1')\n", (1432, 1438), False, 'import decimal\n'), ((490, 523), 'csv.reader', 'csv.reader', (['infile'], {'delimiter': '""";"""'}), "(infile, delimiter=';')\n", (500, 523), False, 'import csv\n'), ((2321, 2343), 'decimal.Decimal', 'decimal.Decimal', (['""".05"""'], {}), "('.05')\n", (2336, 2343), False, 'import decimal\n'), ((4506, 4528), 'decimal.Decimal', 'decimal.Decimal', (['""".05"""'], {}), "('.05')\n", (4521, 4528), False, 'import decimal\n'), ((4738, 4771), 'csv.reader', 'csv.reader', (['infile'], {'delimiter': '""";"""'}), "(infile, delimiter=';')\n", (4748, 4771), False, 'import csv\n')]
|
import os
import requests
from shapely.geometry import Point
import geopandas as gpd
def geo_code(address, city):
"""
Geo code address sing open maps API
Parameters
------------
address: str
Address as clear as possible, better to check first if it can be found in open street search engine
city: str
Name of the city
Returns
---------
results: dict
dictionary with latitude, longitud and state name information
"""
parameters = {'key': os.environ.get("CON_KEY"),
'location': '{0:s}, {1:s}, Brazil'.format(address, city),
'thumbMaps': False,
'maxResults': 1
}
response = requests.get('http://www.mapquestapi.com/geocoding/v1/address', params=parameters)
assert response.status_code==200, 'Review address or internet connection'
results = response.json()['results'][0]['locations'][0]['latLng']
results['state_name'] = response.json()['results'][0]['locations'][0]['adminArea3']
results['street_name'] = response.json()['results'][0]['locations'][0]['street']
assert results['lat']!=39.78373, 'Review address or internet connection'
return results
def convert_geo_to_sector_code(geo_code_output, states_dict, path_to_shapes):
"""
Conver latitud, longitud and state reference to sector code
Parameters
------------
geo_code_output: dict
output of geo_code function
states_dict: dict
correspondence of states names
path_to_shapes: str
path to folder containing shapes
Returns
---------
sector code: str
"""
coordinate_point = Point(geo_code_output['lng'], geo_code_output['lat'])
state_in_response = geo_code_output['state_name']
state_name = states_dict[state_in_response]
assert state_name in os.listdir(path_to_shapes), 'There is no shape available to reference this address'
file_name = [file for file in os.listdir(path_to_shapes+'/'+state_name) if file.find('.shp')>0][0]
census_sector = gpd.read_file(path_to_shapes+'/{0:s}/{1:s}'.format(state_name, file_name), encoding='latin1')
sector_code = census_sector.loc[census_sector.contains(coordinate_point), 'CD_GEOCODI'].values[0]
return sector_code
def flat_cell(cell):
"""
flat dictionarys in celss
"""
if isinstance(cell, dict):
value_cell = list(cell.values())[0]
else:
value_cell = cell
return value_cell
|
[
"os.environ.get",
"shapely.geometry.Point",
"os.listdir",
"requests.get"
] |
[((749, 836), 'requests.get', 'requests.get', (['"""http://www.mapquestapi.com/geocoding/v1/address"""'], {'params': 'parameters'}), "('http://www.mapquestapi.com/geocoding/v1/address', params=\n parameters)\n", (761, 836), False, 'import requests\n'), ((1714, 1767), 'shapely.geometry.Point', 'Point', (["geo_code_output['lng']", "geo_code_output['lat']"], {}), "(geo_code_output['lng'], geo_code_output['lat'])\n", (1719, 1767), False, 'from shapely.geometry import Point\n'), ((535, 560), 'os.environ.get', 'os.environ.get', (['"""CON_KEY"""'], {}), "('CON_KEY')\n", (549, 560), False, 'import os\n'), ((1905, 1931), 'os.listdir', 'os.listdir', (['path_to_shapes'], {}), '(path_to_shapes)\n', (1915, 1931), False, 'import os\n'), ((2028, 2073), 'os.listdir', 'os.listdir', (["(path_to_shapes + '/' + state_name)"], {}), "(path_to_shapes + '/' + state_name)\n", (2038, 2073), False, 'import os\n')]
|
class Url(object):
def __init__(self, url, title, ref_num, depth):
self.url = url
self.title = title
self.ref_num = ref_num
self.depth = depth
def __lt__(self, other):
return self.ref_num > other.ref_num
def __gt__(self, other):
return self.ref_num < other.ref_num
def __eq__(self, other):
return self.ref_num == other.ref_num
class Paper(Url):
def __init__(self, url, title, ref_num, abstract, depth=-1):
super(Paper, self).__init__(url, title, ref_num, depth)
self.abstract = abstract
class Url_pool(object):
def __init__(self):
from heapq import heapify
self.url_his = set()
self.title_his = set()
self.urls = []
heapify(self.urls)
def append_url(self, url):
from heapq import heappush
import re
pun = "[\s+\.\!\/_,$%^*(+\"\']+|[+——!:‐-,。?、~@#¥%……&*()]+"
if re.sub(pun, "", url.title) in self.title_his:
pass
elif url.url in self.url_his:
pass
else:
self.url_his.add(url.url)
self.title_his.add(re.sub(pun, "", url.title))
heappush(self.urls, url)
def get_url(self):
from heapq import heappop
if len(self.urls) > 0:
return heappop(self.urls)
else:
return None
class PaperCrawler(object):
def __init__(self, init_url="https://xueshu.baidu.com/usercenter/paper/show?paperid=3821a90f58762386e257eb4e6fa11f79",
basic_url="https://xueshu.baidu.com", max_depth=5, tot_papers=10, wait_time=2):
self.init_url = init_url
self.basic_url = basic_url
self.max_depth = max_depth
self.tot_papers = tot_papers
self.wait_time = wait_time
self.url_pool = Url_pool()
self.papers = []
def crawl(self):
cur_depth = 0
self.papers.append(self.parse_url(self.init_url, cur_depth))
while len(self.papers) < self.tot_papers:
url = self.url_pool.get_url()
cur_depth = url.depth
self.papers.append(self.parse_url(url.url, cur_depth))
self.store()
def parse_url(self, url, depth):
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
options = Options()
options.add_argument('--headless')
options.add_experimental_option('excludeSwitches', ['enable-logging'])
driver = webdriver.Chrome(options=options)
driver.implicitly_wait(self.wait_time)
driver.get(url)
soup = BeautifulSoup(driver.page_source, 'html.parser')
main_info = soup.find(name='div', attrs={"class":"main-info"})
title = main_info.find(name='h3').text.strip()
print(f"Crawling {len(self.papers)+1}/{self.tot_papers}----------Title: {title}")
try:
abstract = main_info.find(name='p', attrs={"class":"abstract"}).text.strip()
except Exception as e:
abstract = "No Abstract"
ref_num = main_info.find(name='p', attrs={"class":"ref-wr-num"}).text.strip()
if ref_num.endswith("万"):
ref_num = int(float(ref_num[:-1])*10000)
else:
ref_num = int(ref_num)
paper = Paper(url, title, ref_num, abstract)
rel_lists = soup.find(name='ul', attrs={"class":"related_lists"})
if rel_lists and depth < self.max_depth:
rel_urls = rel_lists.find_all(name='li')
for rel_url in rel_urls:
url = self.basic_url + rel_url.find(name='p', attrs={"class":"rel_title"}).find(name="a").get('href')
title = rel_url.find(name='p', attrs={"class":"rel_title"}).find(name="a").text.strip()
try:
ref_num = rel_url.find(name='div', attrs={"class":"sc_info"}).find(name="a").text.strip()
if ref_num.endswith("万"):
ref_num = int(float(ref_num[:-1])*10000)
else:
ref_num = int(ref_num)
except Exception as e:
ref_num = 0
self.url_pool.append_url(Url(url, title, ref_num, depth+1))
driver.quit()
return paper
def store(self, filename='result.txt', encoding='utf-8'):
self.papers.sort()
with open(filename, 'w', errors="ignore") as f:
for paper in self.papers:
f.write(f"Title: {paper.title}\n")
f.write(f"Abstract: {paper.abstract}\n")
f.write(f"Ref_num: {paper.ref_num}\n")
f.write(f"URL: {paper.url}\n")
f.write("\n")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--max-depth", type=int, default=5, help="max_depth")
parser.add_argument("-t", "--tot-papers", type=int, default=10, help="tot_papers")
parser.add_argument("-w", "--wait-time", type=int, default=2, help="wait_time")
parser.add_argument("-i", "--init-url", type=str, default="https://xueshu.baidu.com/usercenter/paper/show?paperid=3821a90f58762386e257eb4e6fa11f79"
, help="init_url")
args = parser.parse_args()
crawler = PaperCrawler(init_url=args.init_url, max_depth=args.max_depth, tot_papers=args.tot_papers, wait_time=args.wait_time)
crawler.crawl()
|
[
"selenium.webdriver.chrome.options.Options",
"argparse.ArgumentParser",
"heapq.heapify",
"heapq.heappush",
"heapq.heappop",
"selenium.webdriver.Chrome",
"bs4.BeautifulSoup",
"re.sub"
] |
[((4906, 4931), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4929, 4931), False, 'import argparse\n'), ((791, 809), 'heapq.heapify', 'heapify', (['self.urls'], {}), '(self.urls)\n', (798, 809), False, 'from heapq import heapify\n'), ((2450, 2459), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (2457, 2459), False, 'from selenium.webdriver.chrome.options import Options\n'), ((2600, 2633), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'options'}), '(options=options)\n', (2616, 2633), False, 'from selenium import webdriver\n'), ((2720, 2768), 'bs4.BeautifulSoup', 'BeautifulSoup', (['driver.page_source', '"""html.parser"""'], {}), "(driver.page_source, 'html.parser')\n", (2733, 2768), False, 'from bs4 import BeautifulSoup\n'), ((986, 1012), 're.sub', 're.sub', (['pun', '""""""', 'url.title'], {}), "(pun, '', url.title)\n", (992, 1012), False, 'import re\n'), ((1364, 1382), 'heapq.heappop', 'heappop', (['self.urls'], {}), '(self.urls)\n', (1371, 1382), False, 'from heapq import heappop\n'), ((1227, 1251), 'heapq.heappush', 'heappush', (['self.urls', 'url'], {}), '(self.urls, url)\n', (1235, 1251), False, 'from heapq import heappush\n'), ((1187, 1213), 're.sub', 're.sub', (['pun', '""""""', 'url.title'], {}), "(pun, '', url.title)\n", (1193, 1213), False, 'import re\n')]
|
from pathlib import Path
import click
from flask import current_app as app
from flask.cli import AppGroup, with_appcontext
blueprints_cli = AppGroup(
"blueprints", short_help="Creation and listing of blueprints."
)
@blueprints_cli.command("create")
@click.argument("name")
@click.option(
"-f",
"--full",
default=False,
show_default=True,
type=bool,
help="Whether the blueprint creation should be minimal",
)
@with_appcontext
def create_bp(name, full):
"""Creates a blueprint with the specified name"""
directory = Path(f"{app.config['BASE_DIR']}/src/{name}")
if not directory.exists():
directory.mkdir(parents=True, exist_ok=True)
click.echo("Created blueprint in {}".format(directory))
init_file = Path(f"{directory}/__init__.py")
with open(init_file, "a") as f:
if full:
lines = [
"from flask import Blueprint \n\n",
f"{name}_bp = Blueprint('{name}',__name__, template_folder='templates', static_folder='static', static_url_path='/static/{name}') \n\n",
"from . import views",
]
f.writelines(lines)
else:
lines = [
"from flask import Blueprint \n\n",
f"{name}_bp = Blueprint('{name}',__name__) \n\n",
"from . import views",
]
f.writelines(lines)
click.echo("Created __init__.py in {}".format(init_file))
if full:
templates_directory = Path(f"{directory}/templates/{name}")
templates_directory.mkdir(parents=True, exist_ok=True)
click.echo("Created templates directory in {}".format(templates_directory))
static_directory = Path(f"{directory}/static")
static_directory.mkdir()
click.echo("Created static directory in {}".format(static_directory))
views_file = Path(f"{directory}/views.py")
with open(views_file, "a") as f:
lines = [f"from . import {name}_bp"]
f.writelines(lines)
click.echo("Created views.py.py in {}".format(views_file))
else:
click.echo("Blueprint/directory exists already", err=True)
@blueprints_cli.command("list")
@with_appcontext
def list():
"""List registered blueprints in Flask app."""
bps = [_ for _ in app.blueprints.keys()]
click.echo(bps)
@blueprints_cli.command("delete")
@click.argument("name")
@with_appcontext
def delete(name):
"""Deletes a blueprint folder"""
directory = Path(f"{app.config['BASE_DIR']}/src/{name}")
if directory.exists():
rmdir_recursive(directory)
click.echo(f"Blueprint deleted in {directory}!")
else:
click.echo("Directory does not exist!", err=True)
def rmdir_recursive(directory):
for i in directory.iterdir():
if i.is_dir():
rmdir_recursive(i)
else:
i.unlink()
directory.rmdir()
|
[
"flask.current_app.blueprints.keys",
"click.argument",
"click.option",
"click.echo",
"pathlib.Path",
"flask.cli.AppGroup"
] |
[((142, 214), 'flask.cli.AppGroup', 'AppGroup', (['"""blueprints"""'], {'short_help': '"""Creation and listing of blueprints."""'}), "('blueprints', short_help='Creation and listing of blueprints.')\n", (150, 214), False, 'from flask.cli import AppGroup, with_appcontext\n'), ((258, 280), 'click.argument', 'click.argument', (['"""name"""'], {}), "('name')\n", (272, 280), False, 'import click\n'), ((282, 416), 'click.option', 'click.option', (['"""-f"""', '"""--full"""'], {'default': '(False)', 'show_default': '(True)', 'type': 'bool', 'help': '"""Whether the blueprint creation should be minimal"""'}), "('-f', '--full', default=False, show_default=True, type=bool,\n help='Whether the blueprint creation should be minimal')\n", (294, 416), False, 'import click\n'), ((2499, 2521), 'click.argument', 'click.argument', (['"""name"""'], {}), "('name')\n", (2513, 2521), False, 'import click\n'), ((555, 599), 'pathlib.Path', 'Path', (['f"""{app.config[\'BASE_DIR\']}/src/{name}"""'], {}), '(f"{app.config[\'BASE_DIR\']}/src/{name}")\n', (559, 599), False, 'from pathlib import Path\n'), ((2446, 2461), 'click.echo', 'click.echo', (['bps'], {}), '(bps)\n', (2456, 2461), False, 'import click\n'), ((2610, 2654), 'pathlib.Path', 'Path', (['f"""{app.config[\'BASE_DIR\']}/src/{name}"""'], {}), '(f"{app.config[\'BASE_DIR\']}/src/{name}")\n', (2614, 2654), False, 'from pathlib import Path\n'), ((769, 801), 'pathlib.Path', 'Path', (['f"""{directory}/__init__.py"""'], {}), "(f'{directory}/__init__.py')\n", (773, 801), False, 'from pathlib import Path\n'), ((1982, 2011), 'pathlib.Path', 'Path', (['f"""{directory}/views.py"""'], {}), "(f'{directory}/views.py')\n", (1986, 2011), False, 'from pathlib import Path\n'), ((2224, 2282), 'click.echo', 'click.echo', (['"""Blueprint/directory exists already"""'], {'err': '(True)'}), "('Blueprint/directory exists already', err=True)\n", (2234, 2282), False, 'import click\n'), ((2725, 2773), 'click.echo', 'click.echo', (['f"""Blueprint deleted in {directory}!"""'], {}), "(f'Blueprint deleted in {directory}!')\n", (2735, 2773), False, 'import click\n'), ((2792, 2841), 'click.echo', 'click.echo', (['"""Directory does not exist!"""'], {'err': '(True)'}), "('Directory does not exist!', err=True)\n", (2802, 2841), False, 'import click\n'), ((1588, 1625), 'pathlib.Path', 'Path', (['f"""{directory}/templates/{name}"""'], {}), "(f'{directory}/templates/{name}')\n", (1592, 1625), False, 'from pathlib import Path\n'), ((1813, 1840), 'pathlib.Path', 'Path', (['f"""{directory}/static"""'], {}), "(f'{directory}/static')\n", (1817, 1840), False, 'from pathlib import Path\n'), ((2419, 2440), 'flask.current_app.blueprints.keys', 'app.blueprints.keys', ([], {}), '()\n', (2438, 2440), True, 'from flask import current_app as app\n')]
|
import socket
import threading
import time
class Tello(object):
"""
Wrapper class to interact with the Tello drone.
"""
def __init__(self, local_ip, local_port, imperial=False,
command_timeout=.3,
tello_ip='192.168.10.1',
tello_port=8889):
"""
Binds to the local IP/port and puts the Tello into command mode.
:param local_ip: Local IP address to bind.
:param local_port: Local port to bind.
:param imperial: If True, speed is MPH and distance is feet.
If False, speed is KPH and distance is meters.
:param command_timeout: Number of seconds to wait for a response to a command.
:param tello_ip: Tello IP.
:param tello_port: Tello port.
"""
self.abort_flag = False
self.command_timeout = command_timeout
self.imperial = imperial
self.response = None
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.tello_address = (tello_ip, tello_port)
self.last_height = 0
self.socket.bind((local_ip, local_port))
# thread for receiving cmd ack
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
self.socket.sendto(b'command', self.tello_address)
print ('sent: command')
def __del__(self):
"""
Closes the local socket.
:return: None.
"""
self.socket.close()
def _receive_thread(self):
"""
Listen to responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
:return: None.
"""
while True:
try:
self.response, _ = self.socket.recvfrom(3000)
except socket.error as exc:
print(f'Caught exception socket.error : {exc}')
def send_command(self, command):
"""
Send a command to the Tello and wait for a response.
:param command: Command to send.
:return: Response from Tello.
"""
print(f'>> send cmd: {command}')
self.abort_flag = False
timer = threading.Timer(self.command_timeout, self.set_abort_flag)
self.socket.sendto(command.encode('utf-8'), self.tello_address)
timer.start()
while self.response is None:
if self.abort_flag is True:
break
timer.cancel()
if self.response is None:
response = 'none_response'
else:
response = self.response.decode('utf-8')
self.response = None
return response
def set_abort_flag(self):
"""
Sets self.abort_flag to True.
Used by the timer in Tello.send_command() to indicate to that a response
timeout has occurred.
:return: None.
"""
self.abort_flag = True
def takeoff(self):
"""
Initiates take-off.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('takeoff')
def set_speed(self, speed):
"""
Sets speed.
This method expects KPH or MPH. The Tello API expects speeds from
1 to 100 centimeters/second.
Metric: .1 to 3.6 KPH
Imperial: .1 to 2.2 MPH
:param speed: Speed.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
speed = float(speed)
if self.imperial is True:
speed = int(round(speed * 44.704))
else:
speed = int(round(speed * 27.7778))
return self.send_command(f'speed {speed}')
def rotate_cw(self, degrees):
"""
Rotates clockwise.
:param degrees: Degrees to rotate, 1 to 360.
:return:Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command(f'cw {degrees}')
def rotate_ccw(self, degrees):
"""
Rotates counter-clockwise.
:param degrees: Degrees to rotate, 1 to 360.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command(f'ccw {degrees}')
def flip(self, direction):
"""
Flips.
:param direction: Direction to flip, 'l', 'r', 'f', 'b'.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command(f'flip {direction}')
def get_response(self):
"""
Returns response of tello.
:return: Response of tello.
"""
response = self.response
return response
def get_height(self):
"""
Returns height(dm) of tello.
:return: Height(dm) of tello.
"""
height = self.send_command('height?')
height = str(height)
height = filter(str.isdigit, height)
try:
height = int(height)
self.last_height = height
except:
height = self.last_height
pass
return height
def get_battery(self):
"""
Returns percent battery life remaining.
:return: Percent battery life remaining.
"""
battery = self.send_command('battery?')
try:
battery = int(battery)
except:
pass
return battery
def get_flight_time(self):
"""
Returns the number of seconds elapsed during flight.
:return: Seconds elapsed during flight.
"""
flight_time = self.send_command('time?')
try:
flight_time = int(flight_time)
except:
pass
return flight_time
def get_speed(self):
"""
Returns the current speed.
:return: Current speed in KPH or MPH.
"""
speed = self.send_command('speed?')
try:
speed = float(speed)
if self.imperial is True:
speed = round((speed / 44.704), 1)
else:
speed = round((speed / 27.7778), 1)
except:
pass
return speed
def land(self):
"""
Initiates landing.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('land')
def move(self, direction, distance):
"""
Moves in a direction for a distance.
This method expects meters or feet. The Tello API expects distances
from 20 to 500 centimeters.
Metric: .02 to 5 meters
Imperial: .7 to 16.4 feet
:param direction: Direction to move, 'forward', 'back', 'right' or 'left'.
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
distance = float(distance)
if self.imperial is True:
distance = int(round(distance * 30.48))
else:
distance = int(round(distance * 100))
return self.send_command(f'{direction} {distance}')
def move_backward(self, distance):
"""
Moves backward for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('back', distance)
def move_down(self, distance):
"""
Moves down for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('down', distance)
def move_forward(self, distance):
"""
Moves forward for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('forward', distance)
def move_left(self, distance):
"""
Moves left for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('left', distance)
def move_right(self, distance):
"""
Moves right for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('right', distance)
def move_up(self, distance):
"""
Moves up for a distance.
See comments for Tello.move().
:param distance: Distance to move.
:return: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('up', distance)
|
[
"threading.Thread",
"threading.Timer",
"socket.socket"
] |
[((975, 1023), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (988, 1023), False, 'import socket\n'), ((1224, 1269), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._receive_thread'}), '(target=self._receive_thread)\n', (1240, 1269), False, 'import threading\n'), ((2275, 2333), 'threading.Timer', 'threading.Timer', (['self.command_timeout', 'self.set_abort_flag'], {}), '(self.command_timeout, self.set_abort_flag)\n', (2290, 2333), False, 'import threading\n')]
|
from pymongo import MongoClient
from user import User
import json
class Database:
def __init__(self):
self.client = MongoClient(
'localhost', 27017, username="root", password="<PASSWORD>")
self.db = self.client.test_database
self.users = self.db.users
self.settings = self.db.settings
def get_user(self, name):
data = self.users.find_one({"name": name})
if (data):
return User(name=data["name"], balance=data["balance"])
else:
return None
def get_all_users(self):
pass
def create_user(self, user):
if not self.user_exsists(user):
create_id = self.users.insert_one(user.__dict__).inserted_id
return create_id
print("User already exsists")
return
def delete_user(self, name):
self.users.delete_one({"name": name})
def update_balance(self, user):
self.users.find_one_and_update(
{"name": user.name}, {"$set": {"balance": user.balance}})
def increase_balance(self, user, amount):
user.balance += amount
self.update_balance(user)
def decrease_balance(self, user, amount):
user.balance -= amount
self.update_balance(user)
def user_exsists(self, user):
if list(self.users.find({"name": user.name})):
return True
else:
return False
def get_settings(self):
return self.settings.find_one({})
def update_settings(self, settings):
items = {}
for item in settings.items:
items[item.name] = item.price
self.settings.find_one_and_update({}, {"$set": items})
def create_settings(self, settings):
items = {}
for item in settings.items:
items[item.name] = item.price
self.settings.insert_one(items)
|
[
"pymongo.MongoClient",
"user.User"
] |
[((130, 201), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {'username': '"""root"""', 'password': '"""<PASSWORD>"""'}), "('localhost', 27017, username='root', password='<PASSWORD>')\n", (141, 201), False, 'from pymongo import MongoClient\n'), ((455, 503), 'user.User', 'User', ([], {'name': "data['name']", 'balance': "data['balance']"}), "(name=data['name'], balance=data['balance'])\n", (459, 503), False, 'from user import User\n')]
|
import argparse
import multiprocessing
from multiprocessing.queues import Queue
from queue import Empty
from PIL.Image import Image
from src.base import IO
def repeat(queue: Queue, resize):
try:
while True:
load_resize_save(queue.get(True, 5), resize)
except Empty:
return
def load_resize_save(image_file, resize):
img: Image = IO.load_image(image_file)
img = img.resize(resize)
img.save(image_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--source', type=str)
parser.add_argument('--target_size', type=float)
parser.add_argument('--worker', type=int)
parser.add_argument('--image_glob', type=str, default='**/*.jpg')
args = parser.parse_args()
images = IO.get_image_paths(args.source, args.image_glob)
resize = IO.load_image(images[0]).size
resize = tuple(int(args.target_size * el) for el in resize)
multiprocessing.set_start_method('spawn')
q = multiprocessing.Queue()
for img_file in images:
q.put(img_file)
processes = [
multiprocessing.Process(target=repeat, args=(q, resize), daemon=True) for w in range(args.worker)
]
for process in processes:
process.start()
for process in processes:
process.join()
|
[
"src.base.IO.get_image_paths",
"argparse.ArgumentParser",
"multiprocessing.set_start_method",
"src.base.IO.load_image",
"multiprocessing.Queue",
"multiprocessing.Process"
] |
[((374, 399), 'src.base.IO.load_image', 'IO.load_image', (['image_file'], {}), '(image_file)\n', (387, 399), False, 'from src.base import IO\n'), ((496, 521), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (519, 521), False, 'import argparse\n'), ((783, 831), 'src.base.IO.get_image_paths', 'IO.get_image_paths', (['args.source', 'args.image_glob'], {}), '(args.source, args.image_glob)\n', (801, 831), False, 'from src.base import IO\n'), ((945, 986), 'multiprocessing.set_start_method', 'multiprocessing.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (977, 986), False, 'import multiprocessing\n'), ((995, 1018), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (1016, 1018), False, 'import multiprocessing\n'), ((846, 870), 'src.base.IO.load_image', 'IO.load_image', (['images[0]'], {}), '(images[0])\n', (859, 870), False, 'from src.base import IO\n'), ((1098, 1167), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'repeat', 'args': '(q, resize)', 'daemon': '(True)'}), '(target=repeat, args=(q, resize), daemon=True)\n', (1121, 1167), False, 'import multiprocessing\n')]
|
import numpy as np
import pandas as pd
import time
from pathlib import Path
from experiments.evaluation import calculate_metrics
from causal_estimators.ipw_estimator import IPWEstimator
from causal_estimators.standardization_estimator import \
StandardizationEstimator, StratifiedStandardizationEstimator
from experiments.evaluation import run_model_cv
from loading import load_from_folder
from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier
from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC
from sklearn.kernel_ridge import KernelRidge
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.gaussian_process import GaussianProcessClassifier, GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, GradientBoostingRegressor,\
RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.exceptions import UndefinedMetricWarning
import warnings
warnings.simplefilter(action='ignore', category=UndefinedMetricWarning)
# warnings.filterwarnings("ignore", message="UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 due to no predicted samples. Use `zero_division` parameter to control this behavior.")
RESULTS_DIR = Path('results')
alphas = {'alpha': np.logspace(-4, 5, 10)}
# gammas = [] + ['scale']
Cs = np.logspace(-4, 5, 10)
d_Cs = {'C': Cs}
SVM = 'svm'
d_Cs_pipeline = {SVM + '__C': Cs}
max_depths = list(range(2, 10 + 1)) + [None]
d_max_depths = {'max_depth': max_depths}
d_max_depths_base = {'base_estimator__max_depth': max_depths}
Ks = {'n_neighbors': [1, 2, 3, 5, 10, 15, 25, 50, 100, 200]}
OUTCOME_MODEL_GRID = [
('LinearRegression', LinearRegression(), {}),
('LinearRegression_interact',
make_pipeline(PolynomialFeatures(degree=2, interaction_only=True),
LinearRegression()),
{}),
('LinearRegression_degree2',
make_pipeline(PolynomialFeatures(degree=2), LinearRegression()), {}),
# ('LinearRegression_degree3',
# make_pipeline(PolynomialFeatures(degree=3), LinearRegression()), {}),
('Ridge', Ridge(), alphas),
('Lasso', Lasso(), alphas),
('ElasticNet', ElasticNet(), alphas),
('KernelRidge', KernelRidge(), alphas),
('SVM_rbf', SVR(kernel='rbf'), d_Cs),
('SVM_sigmoid', SVR(kernel='sigmoid'), d_Cs),
('LinearSVM', LinearSVR(), d_Cs),
# (SVR(kernel='linear'), d_Cs), # doesn't seem to work (runs forever)
# TODO: add tuning of SVM gamma, rather than using the default "scale" setting
# SVMs are sensitive to input scale
('Standardized_SVM_rbf', Pipeline([('standard', StandardScaler()), (SVM, SVR(kernel='rbf'))]),
d_Cs_pipeline),
('Standardized_SVM_sigmoid', Pipeline([('standard', StandardScaler()), (SVM, SVR(kernel='sigmoid'))]),
d_Cs_pipeline),
('Standardized_LinearSVM', Pipeline([('standard', StandardScaler()), (SVM, LinearSVR())]),
d_Cs_pipeline),
('kNN', KNeighborsRegressor(), Ks),
# GaussianProcessRegressor(),
# TODO: also cross-validate over min_samples_split and min_samples_leaf
('DecisionTree', DecisionTreeRegressor(), d_max_depths),
# ('RandomForest', RandomForestRegressor(), d_max_depths),
# TODO: also cross-validate over learning_rate
# ('AdaBoost', AdaBoostRegressor(base_estimator=DecisionTreeRegressor(max_depth=None)), d_max_depths_base),
# ('GradientBoosting', GradientBoostingRegressor(), d_max_depths),
# MLPRegressor(max_iter=1000),
# MLPRegressor(alpha=1, max_iter=1000),
]
PROP_SCORE_MODEL_GRID = [
('LogisticRegression_l2', LogisticRegression(penalty='l2'), d_Cs),
('LogisticRegression', LogisticRegression(penalty='none'), {}),
('LogisticRegression_l2_liblinear', LogisticRegression(penalty='l2', solver='liblinear'), d_Cs),
('LogisticRegression_l1_liblinear', LogisticRegression(penalty='l1', solver='liblinear'), d_Cs),
('LogisticRegression_l1_saga', LogisticRegression(penalty='l1', solver='saga'), d_Cs),
('LDA', LinearDiscriminantAnalysis(), {}),
('LDA_shrinkage', LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto'), {}),
('QDA', QuadraticDiscriminantAnalysis(), {}),
# TODO: add tuning of SVM gamma, rather than using the default "scale" setting
('SVM_rbf', SVC(kernel='rbf', probability=True), d_Cs),
('SVM_sigmoid', SVC(kernel='sigmoid', probability=True), d_Cs),
# ('SVM_linear', SVC(kernel='linear', probability=True), d_Cs), # doesn't seem to work (runs forever)
# SVMs are sensitive to input scale
('Standardized_SVM_rbf', Pipeline([('standard', StandardScaler()), (SVM, SVC(kernel='rbf', probability=True))]),
d_Cs_pipeline),
('Standardized_SVM_sigmoid', Pipeline([('standard', StandardScaler()),
(SVM, SVC(kernel='sigmoid', probability=True))]),
d_Cs_pipeline),
# ('Standardized_SVM_linear', Pipeline([('standard', StandardScaler()),
# (SVM, SVC(kernel='linear', probability=True))]),
# d_Cs_pipeline), # doesn't seem to work (runs forever)
('kNN', KNeighborsClassifier(), Ks),
# GaussianProcessClassifier(),
('GaussianNB', GaussianNB(), {}),
# TODO: also cross-validate over min_samples_split and min_samples_leaf
('DecisionTree', DecisionTreeClassifier(), d_max_depths),
# ('RandomForest', RandomForestClassifier(), max_depths),
# TODO: also cross-validate over learning_rate
# ('AdaBoost', AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=None)), d_max_depths_base),
# ('GradientBoosting', GradientBoostingClassifier(), d_max_depths),
# MLPClassifier(max_iter=1000),
# MLPClassifier(alpha=1, max_iter=1000),
]
psid_gen_model, args = load_from_folder(dataset='lalonde_psid1')
cps_gen_model, args = load_from_folder(dataset='lalonde_cps1')
twins_gen_model, args = load_from_folder(dataset='twins')
psid_ate = psid_gen_model.ate(noisy=True)
psid_ite = psid_gen_model.ite(noisy=True).squeeze()
cps_ate = cps_gen_model.ate(noisy=True)
cps_ite = cps_gen_model.ite(noisy=True).squeeze()
twins_ate = twins_gen_model.ate(noisy=False)
twins_ite = twins_gen_model.ite(noisy=False).squeeze()
GEN_MODELS = [
('lalonde_psid', psid_gen_model, psid_ate, psid_ite),
('lalonde_cps', cps_gen_model, cps_ate, cps_ite),
('twins', twins_gen_model, twins_ate, twins_ite)
]
t_start = time.time()
N_SEEDS_CV = 5
N_SEEDS_METRICS = 5
def run_experiments_for_estimator(get_estimator_func, model_grid, save_location,
meta_est_name, model_type, exclude=[],
gen_models=GEN_MODELS, n_seeds_cv=N_SEEDS_CV,
n_seeds_metrics=N_SEEDS_METRICS):
# if outcome_model_grid is None and prop_score_model_grid is None:
# raise ValueError('Either outcome_model_grid or prop_score_model_grid must be not None.')
# if outcome_model_grid is not None and prop_score_model_grid is not None:
# raise ValueError('Currently only supporting one non-None model grid.')
# outcome_modeling = outcome_model_grid is not None
# model_grid = outcome_model_grid if outcome_modeling else prop_score_model_grid
# model_type = 'outcome' if outcome_modeling else 'prop_score'
valid_model_types = ['outcome', 'prop_score']
if model_type not in valid_model_types:
raise ValueError('Invalid model_type... Valid model_types: {}'.format(valid_model_types))
param_str = 'params_' + model_type + '_model'
dataset_dfs = []
for gen_name, gen_model, ate, ite in gen_models:
print('DATASET:', gen_name)
dataset_start = time.time()
model_dfs = []
for model_name, model, param_grid in model_grid:
print('MODEL:', model_name)
if (gen_name, model_name) in exclude or model_name in exclude:
print('SKIPPING')
continue
model_start = time.time()
results = run_model_cv(gen_model, model, model_name=model_name, param_grid=param_grid,
n_seeds=n_seeds_cv, model_type=model_type, best_model=False, ret_time=False)
metrics_list = []
for params in results[param_str]:
try:
est_start = time.time()
estimator = get_estimator_func(model.set_params(**params))
metrics = calculate_metrics(gen_model, estimator, n_seeds=n_seeds_metrics,
conf_ints=False, ate=ate, ite=ite)
est_end = time.time()
# Add estimator fitting time in minutes
metrics['time'] = (est_end - est_start) / 60
metrics_list.append(metrics)
except ValueError:
print('Skipping {} params: {}'.format(model_name, params))
causal_metrics = pd.DataFrame(metrics_list)
model_df = pd.concat([results, causal_metrics], axis=1)
model_df.insert(0, 'dataset', gen_name)
model_df.insert(1, 'meta-estimator', meta_est_name)
model_dfs.append(model_df)
model_end = time.time()
print(model_name, 'time:', (model_end - model_start) / 60, 'minutes')
dataset_df = pd.concat(model_dfs, axis=0)
dataset_end = time.time()
print(gen_name, 'time:', (dataset_end - dataset_start) / 60 / 60, 'hours')
dataset_dfs.append(dataset_df)
full_df = pd.concat(dataset_dfs, axis=0)
t_end = time.time()
print('Total time elapsed:', (t_end - t_start) / 60 / 60, 'hours')
full_df.to_csv(save_location, float_format='%.2f', index=False)
return full_df
print('STANDARDIZATION')
stand_df = run_experiments_for_estimator(
lambda model: StandardizationEstimator(outcome_model=model),
model_grid=OUTCOME_MODEL_GRID,
save_location=RESULTS_DIR / 'psid_cps_twins_standard.csv',
meta_est_name='standardization',
model_type='outcome',
gen_models=GEN_MODELS)
print('STRATIFIED STANDARDIZATION')
strat_df = run_experiments_for_estimator(
lambda model: StratifiedStandardizationEstimator(outcome_models=model),
model_grid=OUTCOME_MODEL_GRID,
exclude=[('lalonde_cps', 'KernelRidge')],
save_location=RESULTS_DIR / 'psid_cps_twins_strat_standard.csv',
meta_est_name='stratified_standardization',
model_type='outcome',
gen_models=GEN_MODELS)
print('IPW')
ps_df = run_experiments_for_estimator(
lambda model: IPWEstimator(prop_score_model=model),
model_grid=PROP_SCORE_MODEL_GRID,
# exclude=[('lalonde_psid', 'SVM_rbf')],
exclude=['SVM_rbf'],
save_location=RESULTS_DIR / 'psid_cps_twins_ipw.csv',
meta_est_name='ipw',
model_type='prop_score',
gen_models=GEN_MODELS)
print('IPW TRIM EPS 0.01')
ps_trim_df = run_experiments_for_estimator(
lambda model: IPWEstimator(prop_score_model=model, trim_eps=0.01),
model_grid=PROP_SCORE_MODEL_GRID,
# exclude=[('lalonde_psid', 'SVM_rbf')],
exclude=['SVM_rbf'],
save_location=RESULTS_DIR / 'psid_cps_twins_ipw_trim_01.csv',
meta_est_name='ipw_trimeps.01',
model_type='prop_score',
gen_models=GEN_MODELS)
print('IPW Stabilized weights')
ps_stab_df = run_experiments_for_estimator(
lambda model: IPWEstimator(prop_score_model=model, stabilized=True),
model_grid=PROP_SCORE_MODEL_GRID,
# exclude=[('lalonde_psid', 'SVM_rbf')],
exclude=['SVM_rbf'],
save_location=RESULTS_DIR / 'psid_cps_twins_ipw_stabilized.csv',
meta_est_name='ipw_stabilized',
model_type='prop_score',
gen_models=GEN_MODELS)
|
[
"sklearn.preprocessing.StandardScaler",
"numpy.logspace",
"experiments.evaluation.calculate_metrics",
"sklearn.tree.DecisionTreeClassifier",
"loading.load_from_folder",
"pathlib.Path",
"sklearn.svm.SVC",
"sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis",
"causal_estimators.standardization_estimator.StandardizationEstimator",
"pandas.DataFrame",
"warnings.simplefilter",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.linear_model.ElasticNet",
"pandas.concat",
"sklearn.svm.LinearSVR",
"sklearn.linear_model.Lasso",
"sklearn.linear_model.Ridge",
"experiments.evaluation.run_model_cv",
"sklearn.linear_model.LinearRegression",
"sklearn.linear_model.LogisticRegression",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis",
"causal_estimators.ipw_estimator.IPWEstimator",
"sklearn.svm.SVR",
"sklearn.neighbors.KNeighborsRegressor",
"sklearn.naive_bayes.GaussianNB",
"sklearn.kernel_ridge.KernelRidge",
"time.time",
"causal_estimators.standardization_estimator.StratifiedStandardizationEstimator",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.neighbors.KNeighborsClassifier"
] |
[((1465, 1536), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'UndefinedMetricWarning'}), "(action='ignore', category=UndefinedMetricWarning)\n", (1486, 1536), False, 'import warnings\n'), ((1754, 1769), 'pathlib.Path', 'Path', (['"""results"""'], {}), "('results')\n", (1758, 1769), False, 'from pathlib import Path\n'), ((1845, 1867), 'numpy.logspace', 'np.logspace', (['(-4)', '(5)', '(10)'], {}), '(-4, 5, 10)\n', (1856, 1867), True, 'import numpy as np\n'), ((6260, 6301), 'loading.load_from_folder', 'load_from_folder', ([], {'dataset': '"""lalonde_psid1"""'}), "(dataset='lalonde_psid1')\n", (6276, 6301), False, 'from loading import load_from_folder\n'), ((6324, 6364), 'loading.load_from_folder', 'load_from_folder', ([], {'dataset': '"""lalonde_cps1"""'}), "(dataset='lalonde_cps1')\n", (6340, 6364), False, 'from loading import load_from_folder\n'), ((6389, 6422), 'loading.load_from_folder', 'load_from_folder', ([], {'dataset': '"""twins"""'}), "(dataset='twins')\n", (6405, 6422), False, 'from loading import load_from_folder\n'), ((6902, 6913), 'time.time', 'time.time', ([], {}), '()\n', (6911, 6913), False, 'import time\n'), ((1790, 1812), 'numpy.logspace', 'np.logspace', (['(-4)', '(5)', '(10)'], {}), '(-4, 5, 10)\n', (1801, 1812), True, 'import numpy as np\n'), ((10030, 10060), 'pandas.concat', 'pd.concat', (['dataset_dfs'], {'axis': '(0)'}), '(dataset_dfs, axis=0)\n', (10039, 10060), True, 'import pandas as pd\n'), ((10074, 10085), 'time.time', 'time.time', ([], {}), '()\n', (10083, 10085), False, 'import time\n'), ((2189, 2207), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2205, 2207), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((2605, 2612), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (2610, 2612), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((2637, 2644), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (2642, 2644), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((2674, 2686), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {}), '()\n', (2684, 2686), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((2718, 2731), 'sklearn.kernel_ridge.KernelRidge', 'KernelRidge', ([], {}), '()\n', (2729, 2731), False, 'from sklearn.kernel_ridge import KernelRidge\n'), ((2759, 2776), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (2762, 2776), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((2805, 2826), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""sigmoid"""'}), "(kernel='sigmoid')\n", (2808, 2826), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((2853, 2864), 'sklearn.svm.LinearSVR', 'LinearSVR', ([], {}), '()\n', (2862, 2864), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((3448, 3469), 'sklearn.neighbors.KNeighborsRegressor', 'KNeighborsRegressor', ([], {}), '()\n', (3467, 3469), False, 'from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor\n'), ((3609, 3632), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (3630, 3632), False, 'from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\n'), ((4086, 4118), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l2"""'}), "(penalty='l2')\n", (4104, 4118), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((4154, 4188), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""none"""'}), "(penalty='none')\n", (4172, 4188), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((4235, 4287), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l2"""', 'solver': '"""liblinear"""'}), "(penalty='l2', solver='liblinear')\n", (4253, 4287), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((4336, 4388), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l1"""', 'solver': '"""liblinear"""'}), "(penalty='l1', solver='liblinear')\n", (4354, 4388), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((4432, 4479), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l1"""', 'solver': '"""saga"""'}), "(penalty='l1', solver='saga')\n", (4450, 4479), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((4501, 4529), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (4527, 4529), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis\n'), ((4558, 4617), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {'solver': '"""lsqr"""', 'shrinkage': '"""auto"""'}), "(solver='lsqr', shrinkage='auto')\n", (4584, 4617), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis\n'), ((4636, 4667), 'sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis', ([], {}), '()\n', (4665, 4667), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis\n'), ((4774, 4809), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'probability': '(True)'}), "(kernel='rbf', probability=True)\n", (4777, 4809), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((4838, 4877), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""sigmoid"""', 'probability': '(True)'}), "(kernel='sigmoid', probability=True)\n", (4841, 4877), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((5610, 5632), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (5630, 5632), False, 'from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor\n'), ((5694, 5706), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (5704, 5706), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((5811, 5835), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (5833, 5835), False, 'from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\n'), ((8169, 8180), 'time.time', 'time.time', ([], {}), '()\n', (8178, 8180), False, 'import time\n'), ((9831, 9859), 'pandas.concat', 'pd.concat', (['model_dfs'], {'axis': '(0)'}), '(model_dfs, axis=0)\n', (9840, 9859), True, 'import pandas as pd\n'), ((9882, 9893), 'time.time', 'time.time', ([], {}), '()\n', (9891, 9893), False, 'import time\n'), ((10331, 10376), 'causal_estimators.standardization_estimator.StandardizationEstimator', 'StandardizationEstimator', ([], {'outcome_model': 'model'}), '(outcome_model=model)\n', (10355, 10376), False, 'from causal_estimators.standardization_estimator import StandardizationEstimator, StratifiedStandardizationEstimator\n'), ((10663, 10719), 'causal_estimators.standardization_estimator.StratifiedStandardizationEstimator', 'StratifiedStandardizationEstimator', ([], {'outcome_models': 'model'}), '(outcome_models=model)\n', (10697, 10719), False, 'from causal_estimators.standardization_estimator import StandardizationEstimator, StratifiedStandardizationEstimator\n'), ((11043, 11079), 'causal_estimators.ipw_estimator.IPWEstimator', 'IPWEstimator', ([], {'prop_score_model': 'model'}), '(prop_score_model=model)\n', (11055, 11079), False, 'from causal_estimators.ipw_estimator import IPWEstimator\n'), ((11418, 11469), 'causal_estimators.ipw_estimator.IPWEstimator', 'IPWEstimator', ([], {'prop_score_model': 'model', 'trim_eps': '(0.01)'}), '(prop_score_model=model, trim_eps=0.01)\n', (11430, 11469), False, 'from causal_estimators.ipw_estimator import IPWEstimator\n'), ((11832, 11885), 'causal_estimators.ipw_estimator.IPWEstimator', 'IPWEstimator', ([], {'prop_score_model': 'model', 'stabilized': '(True)'}), '(prop_score_model=model, stabilized=True)\n', (11844, 11885), False, 'from causal_estimators.ipw_estimator import IPWEstimator\n'), ((2267, 2318), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(2)', 'interaction_only': '(True)'}), '(degree=2, interaction_only=True)\n', (2285, 2318), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((2339, 2357), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2355, 2357), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((2422, 2450), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(2)'}), '(degree=2)\n', (2440, 2450), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((2452, 2470), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2468, 2470), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Lasso, Ridge, ElasticNet, RidgeClassifier\n'), ((8461, 8472), 'time.time', 'time.time', ([], {}), '()\n', (8470, 8472), False, 'import time\n'), ((8495, 8657), 'experiments.evaluation.run_model_cv', 'run_model_cv', (['gen_model', 'model'], {'model_name': 'model_name', 'param_grid': 'param_grid', 'n_seeds': 'n_seeds_cv', 'model_type': 'model_type', 'best_model': '(False)', 'ret_time': '(False)'}), '(gen_model, model, model_name=model_name, param_grid=param_grid,\n n_seeds=n_seeds_cv, model_type=model_type, best_model=False, ret_time=False\n )\n', (8507, 8657), False, 'from experiments.evaluation import run_model_cv\n'), ((9441, 9467), 'pandas.DataFrame', 'pd.DataFrame', (['metrics_list'], {}), '(metrics_list)\n', (9453, 9467), True, 'import pandas as pd\n'), ((9491, 9535), 'pandas.concat', 'pd.concat', (['[results, causal_metrics]'], {'axis': '(1)'}), '([results, causal_metrics], axis=1)\n', (9500, 9535), True, 'import pandas as pd\n'), ((9715, 9726), 'time.time', 'time.time', ([], {}), '()\n', (9724, 9726), False, 'import time\n'), ((3123, 3139), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3137, 3139), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((3148, 3165), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (3151, 3165), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((3247, 3263), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3261, 3263), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((3272, 3293), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""sigmoid"""'}), "(kernel='sigmoid')\n", (3275, 3293), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((3373, 3389), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3387, 3389), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((3398, 3409), 'sklearn.svm.LinearSVR', 'LinearSVR', ([], {}), '()\n', (3407, 3409), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((5087, 5103), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5101, 5103), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((5112, 5147), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'probability': '(True)'}), "(kernel='rbf', probability=True)\n", (5115, 5147), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((5229, 5245), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5243, 5245), False, 'from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n'), ((5297, 5336), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""sigmoid"""', 'probability': '(True)'}), "(kernel='sigmoid', probability=True)\n", (5300, 5336), False, 'from sklearn.svm import SVR, LinearSVR, SVC, LinearSVC\n'), ((8813, 8824), 'time.time', 'time.time', ([], {}), '()\n', (8822, 8824), False, 'import time\n'), ((8934, 9038), 'experiments.evaluation.calculate_metrics', 'calculate_metrics', (['gen_model', 'estimator'], {'n_seeds': 'n_seeds_metrics', 'conf_ints': '(False)', 'ate': 'ate', 'ite': 'ite'}), '(gen_model, estimator, n_seeds=n_seeds_metrics, conf_ints=\n False, ate=ate, ite=ite)\n', (8951, 9038), False, 'from experiments.evaluation import calculate_metrics\n'), ((9112, 9123), 'time.time', 'time.time', ([], {}), '()\n', (9121, 9123), False, 'import time\n')]
|
import os
import json
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils as utils
import sys
import argparse
import matplotlib
import pdb
import numpy as np
import time
import random
import re
import time
import matplotlib.pyplot as plt
from tqdm import tqdm
from tqdm import trange
from sklearn import metrics
from torch.utils import data
from collections import Counter
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config
from torch.cuda.amp import autocast as autocast
from torch.cuda.amp import GradScaler as GradScaler
def seed_everything(args):
random.seed(args.seed)
os.environ['PYTHONASSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def ifinclude(str1,str2):
#name.lower() in linelist[0].lower():
str1list = str1.lower().split(' ') ####name
str2list = str2.lower().split(' ') ####linelist
ifin = False
for i in range(0,len(str2list)):
if str2list[i] == str1list[0]:
ifin = True
for j in range(1,len(str1list)):
if str2list[i+j] != str1list[j]:
ifin = False
break
if ifin == True:
break
else:
continue
return ifin
def handlefile(inputfile,outputfile,allnumber,trainnumber):
f = open(inputfile,'r')
allres = {}
alltype = []
for key in allnumber.keys():
alltype.append(key)
insen = 0
allin = {}
notinsen = 0
allnotin = {}
while True:
line = f.readline().strip()
if not line:
break
linelist = line.split("__ans__")
if len(linelist) != 2:
continue
entitylist = linelist[1]
if entitylist == 'end':
continue
if ';' not in entitylist:
continue
allentity = entitylist.split(";")
if len(allentity) != 2:
continue
firstentity = allentity[0]
#print(firstentity)
if '!' not in firstentity:
continue
splitent = firstentity.split('!')
if len(splitent) != 2:
continue
thistype = splitent[1].strip()
#print(thistype)
if thistype not in alltype:
continue
#print(linelist[0] + '\t' + linelist[1])
name = linelist[1].split(";")[0].split("!")[0].strip(' ')
entype = linelist[1].split(";")[0].split("!")[1].strip(' ')
whole = name + " ! " + entype + " ;"
#print(name)
#####some filters
thissen = linelist[0]
####length
# senlist = thissen.split(' ')
# if len(senlist) <= 3:
# continue
# digitnum = 0
# for one in senlist:
# if re.search(r'\d', one):
# digitnum += 1
# if len(senlist) - digitnum < 1:
# continue
#ifin = ifinclude(name,linelist[0])
#if ifin:
if name.lower() in linelist[0].lower():
length = len(name)
startindex = linelist[0].lower().find(name.lower())
endindex = startindex + length
toreplace = linelist[0][startindex:endindex]
#newsen = linelist[0]
newsen = linelist[0].replace(toreplace,name)
if thistype not in allin:
#allin[thistype] = [linelist[0] + '\t' + linelist[1]]
allin[thistype] = {}
if whole not in allin[thistype]:
insen += 1
allin[thistype][whole] = [newsen]
#else:
# allin[thistype][whole].append(linelist[0])
else:
#allin[thistype].append(linelist[0] + '\t' + linelist[1])
if whole not in allin[thistype]:
insen += 1
allin[thistype][whole] = [newsen]
#else:
# allin[thistype][whole].append(linelist[0])
else:
########some filter
##ensure the entity has similar words in sen
# if name.lower() in linelist[0].lower():
# ###thisone will be used
# str1list = name.lower().split(' ') ####name
# nolowlist = name.split(' ')
# str2list = linelist[0].lower().split(' ') ####linelist
# ifin = False
# touselist = linelist[0].split(' ')
# for i in range(0, len(str2list)):
# if str1list[0] in str2list[i]:
# touselist[i] = nolowlist[0]
# for j in range(1,len(str1list)):
# touselist[i+j] = nolowlist[j]
# else:
# continue
# newsen = ' '.join(touselist)
# else:
# ####whether first similar 0.75 5
# str1list = name.lower().split(' ')
# tousestr = str1list[0]
# str2list = linelist[0].lower().split(' ')
# ifhave = 0
# index = -1
# for j in range(0,len(str2list)):
# thistoken = str2list[j]
# samenum = 0
# for k in range(min(len(tousestr),len(thistoken))):
# if tousestr[k] == thistoken[k]:
# samenum += 1
# else:
# break
# if min(len(tousestr),len(thistoken)) == 0:
# continue
# if samenum >= 5 or float(samenum) / float(min(len(tousestr),len(thistoken))) >= 0.75:
# ifhave = 1
# index = j
# break
# if not ifhave:
# continue
# else:
# ###replace
# newlinelist = linelist[0].split()[0:index] + name.split(' ') + linelist[0].split()[index+1:]
# newsen = " ".join(newlinelist)
if thistype not in allnotin:
#allnotin[thistype] = [linelist[0] + '\t' + linelist[1]]
allnotin[thistype] = {}
if whole not in allnotin[thistype]:
notinsen += 1
newsen = linelist[0] + " " + name
allnotin[thistype][whole] = [newsen]
#else:
# allnotin[thistype][whole].append(linelist[0])
else:
#allnotin[thistype].append(linelist[0] + '\t' + linelist[1])
if whole not in allnotin[thistype]:
notinsen += 1
newsen = linelist[0] + " " + name
allnotin[thistype][whole] = [newsen]
#else:
# allnotin[thistype][whole].append(linelist[0])
f.close()
print(insen)
print(notinsen)
# for key in allin:
# print(key+"\t"+str(len(allin[key])))
# for key in allnotin:
# print(key+"\t"+str(len(allnotin[key])))
# for key in allin:
# for one in allin[key]:
# for aa in allin[key][one]:
# print(aa+" "+one)
# for key in allnotin:
# for one in allnotin[key]:
# for aa in allnotin[key][one]:
# print(aa + " " + one)
finalres = {}
fall = open("allgenerate",'w')
for key in allnumber.keys():
finalres[key] = []
for key in allin:
for one in allin[key]:
for aa in allin[key][one]:
finalres[key].append(aa+"\t"+one)
fall.write(aa+"\t"+one+'\n')
for key in allnotin:
for one in allnotin[key]:
for aa in allnotin[key][one]:
finalres[key].append(aa+"\t"+one)
fall.write(aa + "\t" + one + '\n')
fall.close()
#for key in finalres.keys():
# print(len(finalres[key]))
sampleres = []
trainres = []
validres = []
for key in finalres.keys():
thissample = random.sample(finalres[key],allnumber[key])
#print(thissample)
sampleres.extend(thissample)
####divide to train and valid
thistrainnum = trainnumber[key]
indexlist = [i for i in range(allnumber[key])]
#print(indexlist)
trainuse = random.sample(indexlist,thistrainnum)
#print(trainuse)
for j in range(allnumber[key]):
if j in trainuse:
trainres.append(thissample[j])
else:
validres.append(thissample[j])
#print(trainres)
#print(validres)
#print(sampleres)
fo = open(outputfile, 'w')
for one in sampleres:
fo.write(one+"\n")
fo.close()
fot = open('train_mem.txt', 'w')
for one in trainres:
fot.write(one+"\n")
fot.close()
fov = open('valid_mem.txt', 'w')
for one in validres:
fov.write(one + "\n")
fov.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="latentRE")
parser.add_argument("--model", dest="model", type=str,
default="T5", help="{T5}")
parser.add_argument("--seed", dest="seed", type=int,
default=160, help="seed for network")
args = parser.parse_args()
seed_everything(args)
if args.model == "T5":
#seed 100
#train: person:10 location:12 org:6 mix:7
#valid: person:16 location:12 org:11 mix:8
print("right!")
# allnumber = {'org': 17, 'location': 24, 'person': 26, 'mix': 15}
# trainnumber = {'org': 6, 'location': 12, 'person': 10, 'mix': 7}
# allnumber = {'org':15,'location':14,'person':11,'mix':9}
# trainnumber = {'org':7,'location':8,'person':5,'mix':4}
allnumber = {'org': 16, 'location': 21, 'person': 20, 'mix': 16}
trainnumber = {'org': 7, 'location': 10, 'person': 11, 'mix': 6}
handlefile("pseudosamples", "allselect", allnumber, trainnumber)
else:
raise Exception("No such model! Please make sure that `model` takes the value in {T5}")
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"random.sample",
"torch.manual_seed",
"torch.cuda.manual_seed",
"random.seed"
] |
[((674, 696), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (685, 696), False, 'import random\n'), ((749, 774), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (763, 774), True, 'import numpy as np\n'), ((779, 807), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (796, 807), False, 'import torch\n'), ((812, 845), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (834, 845), False, 'import torch\n'), ((9238, 9285), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""latentRE"""'}), "(description='latentRE')\n", (9261, 9285), False, 'import argparse\n'), ((8280, 8324), 'random.sample', 'random.sample', (['finalres[key]', 'allnumber[key]'], {}), '(finalres[key], allnumber[key])\n', (8293, 8324), False, 'import random\n'), ((8566, 8604), 'random.sample', 'random.sample', (['indexlist', 'thistrainnum'], {}), '(indexlist, thistrainnum)\n', (8579, 8604), False, 'import random\n')]
|
import os
import emulation_lib.ssh_lib as ssh
import logging
from datetime import datetime
from datetime import timedelta
from multiprocessing.dummy import Pool as ThreadPool
import time
from . import constants
CONFIG = {}
EXPECTED_RESULTFILES = {}
CONFIG_FILES = {}
REMOTE = 0
LOCAL = 1
setup_scripts = []
runtime_scripts = []
cmd = ""
logger = logging.getLogger("emulation_lib")
logger.setLevel(logging.INFO)
def inventorize_scripts():
global setup_scripts
global runtime_scripts
# setup-scripts
for filename in [f for f in os.listdir(CONFIG["COMMAND_DIR"]) if f.endswith(constants.SETUP_SCRIPT_POSTFIX)]:
name = filename.replace(constants.SETUP_SCRIPT_POSTFIX, "")
if name not in setup_scripts:
setup_scripts.append(name)
# runtime-scripts
for filename in [f for f in os.listdir(CONFIG["COMMAND_DIR"]) if f.endswith(constants.RUNTIME_SCRIPT_POSTFIX)]:
name = filename.replace(constants.RUNTIME_SCRIPT_POSTFIX, "")
if name not in runtime_scripts:
runtime_scripts.append(name)
return
def perform_sanity_checks():
for ip in setup_scripts:
if ip not in runtime_scripts:
raise ValueError(ip + " is missing a corresponding runtime-script, aborting ...")
for ip in runtime_scripts:
if ip not in setup_scripts:
raise ValueError(ip + " is missing a corresponding setup-script, aborting ...")
def perform_setup(ip):
s = ssh.Connection(ip, CONFIG["SSH_USER"], password=CONFIG["SSH_PASSWORD"])
# create folder structure
foldercmd = "mkdir -p " + CONFIG["REMOTE_EMULATION_DIR"] + " " + CONFIG["REMOTE_CONFIG_DIR"] + " " + CONFIG["REMOTE_RESULT_DIR"] + " " + CONFIG["REMOTE_DATA_DIR"]
s.execute(foldercmd)
target_setup_file = os.path.join(CONFIG["REMOTE_CONFIG_DIR"], constants.SETUP_SCRIPT_POSTFIX)
target_runtime_file = os.path.join(CONFIG["REMOTE_CONFIG_DIR"], constants.RUNTIME_SCRIPT_POSTFIX)
# transmit setup- and runtime-scripts
s.put(os.path.join(CONFIG["COMMAND_DIR"], ip + constants.SETUP_SCRIPT_POSTFIX), target_setup_file)
s.put(os.path.join(CONFIG["COMMAND_DIR"] + "/" + ip + constants.RUNTIME_SCRIPT_POSTFIX), target_runtime_file)
# transmit config-files
for config_file in CONFIG_FILES[ip]:
s.put(config_file[LOCAL], config_file[REMOTE]) # transmit config-file
s.execute("chmod +x " + target_setup_file)
result = s.execute(target_setup_file + " > /dev/null 2>&1 ; date -u; echo 'finished setup'") # wait for completion
logger.info(ip + ": " + str(result))
s.close()
return
def execute_runtime_script(ip):
s = ssh.Connection(ip, CONFIG["SSH_USER"], password=CONFIG["SSH_PASSWORD"])
result = s.execute("screen -d -m " + cmd)
#logger.info(result)
s.close()
return
def collect_traces(ip):
s = ssh.Connection(ip, CONFIG["SSH_USER"], password=CONFIG["SSH_PASSWORD"])
for fileTuple in EXPECTED_RESULTFILES[ip]:
parentdir = os.path.dirname(fileTuple[LOCAL])
if not os.path.isdir(parentdir):
os.makedirs(parentdir) # ensure local folder structure exists
if fileTuple[LOCAL].endswith(".zip"): # zip first
s.execute("rm " + fileTuple[REMOTE] + ".zip") # remove eventually already existing file
s.execute("cd " + os.path.dirname(fileTuple[REMOTE]) + " && zip -j " + os.path.basename(fileTuple[REMOTE]) +
".zip " + os.path.basename(fileTuple[REMOTE]))
s.get(fileTuple[REMOTE] + ".zip", fileTuple[LOCAL])
else:
s.get(fileTuple[REMOTE], fileTuple[LOCAL])
s.close()
#
# main entry-point of the program
#
def start_emulation_run(duration, expectedResultfiles, configFiles, config):
global cmd
global CONFIG
global EXPECTED_RESULTFILES
global CONFIG_FILES
CONFIG = config
EXPECTED_RESULTFILES = expectedResultfiles
CONFIG_FILES = configFiles
# inventorize scripts
inventorize_scripts()
# perform sanity-checks (e.g. there must be a runtime-script for every setup-script and vice versa)
perform_sanity_checks()
# deploy scripts + run all setup-scripts and await their termination
logger.info("Performing the setup (script-distribution + run all setup-scripts) ...")
pool = ThreadPool()
results = pool.map(perform_setup, setup_scripts)
pool.close()
pool.join()
# logger.info(results)
# run all runtime-scripts (async ssh-ops towards single starting-time for all nodes)
logger.info("Starting all runtime-scripts (" + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + ")")
start = datetime.utcnow() + timedelta(seconds=CONFIG["MIN_START_TIME_OFFSET"]) + timedelta(seconds=1)
start_time = start.strftime('%Y-%m-%d %H:%M:%S')
with open(os.path.join(CONFIG["RESULT_DIR"] + 'start_times.txt'), 'a') as time_index: # save common start time for every run
time_index.write(str(CONFIG["RUN"]) + '\t' + start_time + '\n')
logger.info("Coordinated start at: " + start_time)
# build runtime-script-command
cmd = "cmdScheduler " + os.path.join(CONFIG["REMOTE_CONFIG_DIR"],constants.RUNTIME_SCRIPT_POSTFIX) + " " + start_time
# call start-scripts
pool = ThreadPool()
pool.map(execute_runtime_script, setup_scripts)
pool.close()
pool.join()
logger.info("Waiting for emulation to end")
emulationEnd = start + timedelta(seconds=duration)
time.sleep((emulationEnd - datetime.utcnow()).seconds + 1) # '+1' ... account for rounding errors
# collect result-files
logger.info("Waiting five seconds for logfiles to be written (" + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + ")")
time.sleep(5) # wait for (eventual) logfiles to be written
logger.info("Collecting results (" + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + ")")
pool = ThreadPool()
pool.map(collect_traces, expectedResultfiles)
pool.close()
pool.join()
|
[
"os.makedirs",
"os.path.basename",
"multiprocessing.dummy.Pool",
"os.path.isdir",
"os.path.dirname",
"time.sleep",
"datetime.datetime.utcnow",
"emulation_lib.ssh_lib.Connection",
"datetime.timedelta",
"os.path.join",
"os.listdir",
"logging.getLogger"
] |
[((349, 383), 'logging.getLogger', 'logging.getLogger', (['"""emulation_lib"""'], {}), "('emulation_lib')\n", (366, 383), False, 'import logging\n'), ((1461, 1532), 'emulation_lib.ssh_lib.Connection', 'ssh.Connection', (['ip', "CONFIG['SSH_USER']"], {'password': "CONFIG['SSH_PASSWORD']"}), "(ip, CONFIG['SSH_USER'], password=CONFIG['SSH_PASSWORD'])\n", (1475, 1532), True, 'import emulation_lib.ssh_lib as ssh\n'), ((1781, 1854), 'os.path.join', 'os.path.join', (["CONFIG['REMOTE_CONFIG_DIR']", 'constants.SETUP_SCRIPT_POSTFIX'], {}), "(CONFIG['REMOTE_CONFIG_DIR'], constants.SETUP_SCRIPT_POSTFIX)\n", (1793, 1854), False, 'import os\n'), ((1881, 1956), 'os.path.join', 'os.path.join', (["CONFIG['REMOTE_CONFIG_DIR']", 'constants.RUNTIME_SCRIPT_POSTFIX'], {}), "(CONFIG['REMOTE_CONFIG_DIR'], constants.RUNTIME_SCRIPT_POSTFIX)\n", (1893, 1956), False, 'import os\n'), ((2642, 2713), 'emulation_lib.ssh_lib.Connection', 'ssh.Connection', (['ip', "CONFIG['SSH_USER']"], {'password': "CONFIG['SSH_PASSWORD']"}), "(ip, CONFIG['SSH_USER'], password=CONFIG['SSH_PASSWORD'])\n", (2656, 2713), True, 'import emulation_lib.ssh_lib as ssh\n'), ((2844, 2915), 'emulation_lib.ssh_lib.Connection', 'ssh.Connection', (['ip', "CONFIG['SSH_USER']"], {'password': "CONFIG['SSH_PASSWORD']"}), "(ip, CONFIG['SSH_USER'], password=CONFIG['SSH_PASSWORD'])\n", (2858, 2915), True, 'import emulation_lib.ssh_lib as ssh\n'), ((4294, 4306), 'multiprocessing.dummy.Pool', 'ThreadPool', ([], {}), '()\n', (4304, 4306), True, 'from multiprocessing.dummy import Pool as ThreadPool\n'), ((5227, 5239), 'multiprocessing.dummy.Pool', 'ThreadPool', ([], {}), '()\n', (5237, 5239), True, 'from multiprocessing.dummy import Pool as ThreadPool\n'), ((5688, 5701), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (5698, 5701), False, 'import time\n'), ((5855, 5867), 'multiprocessing.dummy.Pool', 'ThreadPool', ([], {}), '()\n', (5865, 5867), True, 'from multiprocessing.dummy import Pool as ThreadPool\n'), ((2010, 2082), 'os.path.join', 'os.path.join', (["CONFIG['COMMAND_DIR']", '(ip + constants.SETUP_SCRIPT_POSTFIX)'], {}), "(CONFIG['COMMAND_DIR'], ip + constants.SETUP_SCRIPT_POSTFIX)\n", (2022, 2082), False, 'import os\n'), ((2113, 2199), 'os.path.join', 'os.path.join', (["(CONFIG['COMMAND_DIR'] + '/' + ip + constants.RUNTIME_SCRIPT_POSTFIX)"], {}), "(CONFIG['COMMAND_DIR'] + '/' + ip + constants.\n RUNTIME_SCRIPT_POSTFIX)\n", (2125, 2199), False, 'import os\n'), ((2983, 3016), 'os.path.dirname', 'os.path.dirname', (['fileTuple[LOCAL]'], {}), '(fileTuple[LOCAL])\n', (2998, 3016), False, 'import os\n'), ((4701, 4721), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (4710, 4721), False, 'from datetime import timedelta\n'), ((5401, 5428), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'duration'}), '(seconds=duration)\n', (5410, 5428), False, 'from datetime import timedelta\n'), ((548, 581), 'os.listdir', 'os.listdir', (["CONFIG['COMMAND_DIR']"], {}), "(CONFIG['COMMAND_DIR'])\n", (558, 581), False, 'import os\n'), ((830, 863), 'os.listdir', 'os.listdir', (["CONFIG['COMMAND_DIR']"], {}), "(CONFIG['COMMAND_DIR'])\n", (840, 863), False, 'import os\n'), ((3032, 3056), 'os.path.isdir', 'os.path.isdir', (['parentdir'], {}), '(parentdir)\n', (3045, 3056), False, 'import os\n'), ((3070, 3092), 'os.makedirs', 'os.makedirs', (['parentdir'], {}), '(parentdir)\n', (3081, 3092), False, 'import os\n'), ((4628, 4645), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4643, 4645), False, 'from datetime import datetime\n'), ((4648, 4698), 'datetime.timedelta', 'timedelta', ([], {'seconds': "CONFIG['MIN_START_TIME_OFFSET']"}), "(seconds=CONFIG['MIN_START_TIME_OFFSET'])\n", (4657, 4698), False, 'from datetime import timedelta\n'), ((4790, 4844), 'os.path.join', 'os.path.join', (["(CONFIG['RESULT_DIR'] + 'start_times.txt')"], {}), "(CONFIG['RESULT_DIR'] + 'start_times.txt')\n", (4802, 4844), False, 'import os\n'), ((5096, 5171), 'os.path.join', 'os.path.join', (["CONFIG['REMOTE_CONFIG_DIR']", 'constants.RUNTIME_SCRIPT_POSTFIX'], {}), "(CONFIG['REMOTE_CONFIG_DIR'], constants.RUNTIME_SCRIPT_POSTFIX)\n", (5108, 5171), False, 'import os\n'), ((3444, 3479), 'os.path.basename', 'os.path.basename', (['fileTuple[REMOTE]'], {}), '(fileTuple[REMOTE])\n', (3460, 3479), False, 'import os\n'), ((5460, 5477), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5475, 5477), False, 'from datetime import datetime\n'), ((4561, 4578), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4576, 4578), False, 'from datetime import datetime\n'), ((5629, 5646), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5644, 5646), False, 'from datetime import datetime\n'), ((5789, 5806), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5804, 5806), False, 'from datetime import datetime\n'), ((3374, 3409), 'os.path.basename', 'os.path.basename', (['fileTuple[REMOTE]'], {}), '(fileTuple[REMOTE])\n', (3390, 3409), False, 'import os\n'), ((3321, 3355), 'os.path.dirname', 'os.path.dirname', (['fileTuple[REMOTE]'], {}), '(fileTuple[REMOTE])\n', (3336, 3355), False, 'import os\n')]
|
from __future__ import absolute_import, division, print_function
from builtins import str
from panoptes_client.panoptes import PanoptesObject, LinkResolver
from panoptes_client.set_member_subject import SetMemberSubject
from panoptes_client.subject import Subject
from panoptes_client.utils import batchable
class SubjectSet(PanoptesObject):
_api_slug = 'subject_sets'
_link_slug = 'subject_sets'
_edit_attributes = (
'display_name',
{
'links': (
'project',
),
'metadata': (
'category',
)
},
)
@property
def subjects(self):
"""
A generator which yields :py:class:`.Subject` objects which are in this
subject set.
Examples::
for subject in subject_set.subjects:
print(subject.id)
"""
for sms in SetMemberSubject.where(subject_set_id=self.id):
yield sms.links.subject
@batchable
def add(self, subjects):
"""
Links the given subjects to this set.
- **subjects** can be a list of :py:class:`.Subject` instances, a list
of subject IDs, a single :py:class:`.Subject` instance, or a single
subject ID.
Examples::
subject_set.add(1234)
subject_set.add([1,2,3,4])
subject_set.add(Subject(1234))
subject_set.add([Subject(12), Subject(34)])
"""
_subjects = self._build_subject_list(subjects)
self.http_post(
'{}/links/subjects'.format(self.id),
json={'subjects': _subjects}
)
@batchable
def remove(self, subjects):
"""
Unlinks the given subjects from this set.
- **subjects** can be a list of :py:class:`.Subject` instances, a list
of subject IDs, a single :py:class:`.Subject` instance, or a single
subject ID.
Examples::
subject_set.remove(1234)
subject_set.remove([1,2,3,4])
subject_set.remove(Subject(1234))
subject_set.remove([Subject(12), Subject(34)])
"""
_subjects = self._build_subject_list(subjects)
_subjects_ids = ",".join(_subjects)
self.http_delete(
'{}/links/subjects/{}'.format(self.id, _subjects_ids)
)
def __contains__(self, subject):
"""
Tests if the subject is linked to the subject_set.
- **subject** a single :py:class:`.Subject` instance, or a single
subject ID.
Returns a boolean indicating if the subject is linked to the
subject_set.
Examples::
1234 in subject_set
Subject(1234) in subject_set
"""
if isinstance(subject, Subject):
_subject_id = str(subject.id)
else:
_subject_id = str(subject)
linked_subject_count = SetMemberSubject.where(
subject_set_id=self.id,
subject_id=_subject_id
).object_count
return linked_subject_count == 1
def _build_subject_list(self, subjects):
_subjects = []
for subject in subjects:
if not (
isinstance(subject, Subject)
or isinstance(subject, (int, str,))
):
raise TypeError
if isinstance(subject, Subject):
_subject_id = str(subject.id)
else:
_subject_id = str(subject)
_subjects.append(_subject_id)
return _subjects
LinkResolver.register(SubjectSet)
LinkResolver.register(SubjectSet, 'subject_set')
|
[
"builtins.str",
"panoptes_client.panoptes.LinkResolver.register",
"panoptes_client.set_member_subject.SetMemberSubject.where"
] |
[((3590, 3623), 'panoptes_client.panoptes.LinkResolver.register', 'LinkResolver.register', (['SubjectSet'], {}), '(SubjectSet)\n', (3611, 3623), False, 'from panoptes_client.panoptes import PanoptesObject, LinkResolver\n'), ((3624, 3672), 'panoptes_client.panoptes.LinkResolver.register', 'LinkResolver.register', (['SubjectSet', '"""subject_set"""'], {}), "(SubjectSet, 'subject_set')\n", (3645, 3672), False, 'from panoptes_client.panoptes import PanoptesObject, LinkResolver\n'), ((906, 952), 'panoptes_client.set_member_subject.SetMemberSubject.where', 'SetMemberSubject.where', ([], {'subject_set_id': 'self.id'}), '(subject_set_id=self.id)\n', (928, 952), False, 'from panoptes_client.set_member_subject import SetMemberSubject\n'), ((2838, 2853), 'builtins.str', 'str', (['subject.id'], {}), '(subject.id)\n', (2841, 2853), False, 'from builtins import str\n'), ((2894, 2906), 'builtins.str', 'str', (['subject'], {}), '(subject)\n', (2897, 2906), False, 'from builtins import str\n'), ((2939, 3009), 'panoptes_client.set_member_subject.SetMemberSubject.where', 'SetMemberSubject.where', ([], {'subject_set_id': 'self.id', 'subject_id': '_subject_id'}), '(subject_set_id=self.id, subject_id=_subject_id)\n', (2961, 3009), False, 'from panoptes_client.set_member_subject import SetMemberSubject\n'), ((3442, 3457), 'builtins.str', 'str', (['subject.id'], {}), '(subject.id)\n', (3445, 3457), False, 'from builtins import str\n'), ((3506, 3518), 'builtins.str', 'str', (['subject'], {}), '(subject)\n', (3509, 3518), False, 'from builtins import str\n')]
|
from discord import Colour, Embed
from discord.ext.commands import (BadArgument, Cog, CommandError,
CommandNotFound, Context,
MissingRequiredArgument)
from bot.bot import SirRobin
from bot.log import get_logger
log = get_logger(__name__)
class ErrorHandler(Cog):
"""Handles errors emitted from commands."""
def __init__(self, bot: SirRobin):
self.bot = bot
@staticmethod
def _get_error_embed(title: str, body: str) -> Embed:
"""Return a embed with our error colour assigned."""
return Embed(
title=title,
colour=Colour.brand_red(),
description=body
)
@Cog.listener()
async def on_command_error(self, ctx: Context, error: CommandError) -> None:
"""
Generic command error handling from other cogs.
Using the error type, handle the error appropriately.
if there is no handling for the error type raised,
a message will be sent to the user & it will be logged.
In the future, I would expect this to be used as a place
to push errors to a sentry instance.
"""
log.trace(f"Handling a raised error {error} from {ctx.command}")
# We could handle the subclasses of UserInputError errors together, using the error
# name as the embed title. Before doing this we would have to verify that all messages
# attached to subclasses of this error are human-readable, as they are user facing.
if isinstance(error, BadArgument):
embed = self._get_error_embed("Bad argument", str(error))
await ctx.send(embed=embed)
return
elif isinstance(error, CommandNotFound):
embed = self._get_error_embed("Command not found", str(error))
await ctx.send(embed=embed)
return
elif isinstance(error, MissingRequiredArgument):
embed = self._get_error_embed("Missing required argument", str(error))
await ctx.send(embed=embed)
return
# If we haven't handled it by this point, it is considered an unexpected/handled error.
await ctx.send(
f"Sorry, an unexpected error occurred. Please let us know!\n\n"
f"```{error.__class__.__name__}: {error}```"
)
log.error(f"Error executing command invoked by {ctx.message.author}: {ctx.message.content}", exc_info=error)
async def setup(bot: SirRobin) -> None:
"""Load the ErrorHandler cog."""
await bot.add_cog(ErrorHandler(bot))
|
[
"bot.log.get_logger",
"discord.Colour.brand_red",
"discord.ext.commands.Cog.listener"
] |
[((287, 307), 'bot.log.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (297, 307), False, 'from bot.log import get_logger\n'), ((715, 729), 'discord.ext.commands.Cog.listener', 'Cog.listener', ([], {}), '()\n', (727, 729), False, 'from discord.ext.commands import BadArgument, Cog, CommandError, CommandNotFound, Context, MissingRequiredArgument\n'), ((650, 668), 'discord.Colour.brand_red', 'Colour.brand_red', ([], {}), '()\n', (666, 668), False, 'from discord import Colour, Embed\n')]
|
import os
import shlex
import subprocess
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""
Compresses / uploads an ATLAS database tarball
"""
help = "Compresses / uploads an ATLAS database tarball"
@staticmethod
def do_shell_command(command_string):
result = subprocess.check_output(shlex.split(command_string))
return result.decode("utf-8")
def handle(self, *args, **options):
database_path = settings.SV_ATLAS_DB_PATH
if database_path is None:
msg = "The SV_ATLAS_DB_PATH setting is missing and is required for this management command to work."
raise ImproperlyConfigured(msg)
self.stdout.write(
"--[Creating / uploading database tarball]--"
)
database_file = os.path.basename(database_path)
result = self.do_shell_command(f"md5sum {database_path}")
md5sha = result.split(" ")[0]
self.stdout.write(f"{database_path} md5 sha: {md5sha}")
database_dir = os.path.dirname(database_path)
os.chdir(database_dir)
compressed_db_filename = f"db-{md5sha}.tgz"
self.stdout.write(f"Compressing {database_path} as {compressed_db_filename} ")
tar_cmd = f"tar -cvzf {compressed_db_filename} {database_file}"
self.do_shell_command(tar_cmd)
bucket = "atlas-db-tarballs"
site = "sv-pdl"
self.stdout.write(f"Uploading {compressed_db_filename} to {bucket}")
gsutil_cmd = f"gsutil -m cp -a public-read {compressed_db_filename} gs://{bucket}/{site}/{compressed_db_filename}"
self.do_shell_command(gsutil_cmd)
url = f"https://storage.googleapis.com/{bucket}/{site}/{compressed_db_filename}"
self.stdout.write(f"Uploaded to {url}")
self.stdout.write(f"Removing {compressed_db_filename}")
rm_cmd = f"rm {compressed_db_filename}"
self.do_shell_command(rm_cmd)
self.stdout.write(f"Writing {url} to .atlas-db-url")
atlas_db_url_path = os.path.join(
settings.PROJECT_ROOT,
".atlas-db-url"
)
with open(atlas_db_url_path, "w") as f:
f.write(url)
self.stdout.write("--[Done!]--")
# NOTE: run export ATLAS_DB_URL=$(cat .atlas-db-url)
# to populate $ATLAS_DB_URL
|
[
"django.core.exceptions.ImproperlyConfigured",
"os.path.basename",
"os.path.dirname",
"shlex.split",
"os.path.join",
"os.chdir"
] |
[((914, 945), 'os.path.basename', 'os.path.basename', (['database_path'], {}), '(database_path)\n', (930, 945), False, 'import os\n'), ((1138, 1168), 'os.path.dirname', 'os.path.dirname', (['database_path'], {}), '(database_path)\n', (1153, 1168), False, 'import os\n'), ((1177, 1199), 'os.chdir', 'os.chdir', (['database_dir'], {}), '(database_dir)\n', (1185, 1199), False, 'import os\n'), ((2134, 2186), 'os.path.join', 'os.path.join', (['settings.PROJECT_ROOT', '""".atlas-db-url"""'], {}), "(settings.PROJECT_ROOT, '.atlas-db-url')\n", (2146, 2186), False, 'import os\n'), ((443, 470), 'shlex.split', 'shlex.split', (['command_string'], {}), '(command_string)\n', (454, 470), False, 'import shlex\n'), ((767, 792), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['msg'], {}), '(msg)\n', (787, 792), False, 'from django.core.exceptions import ImproperlyConfigured\n')]
|
from typing import Dict
import pytest
from limited import Zone
from limited.exceptions import LimitExceededException
class MockZone(Zone):
buckets: Dict[str, int]
def __init__(self, size: int = 10, rate: float = 1.0):
self.rate = rate
self.size = size
self.buckets = dict()
def count(self, key: str) -> int:
return self.buckets[key]
def remove(self, key: str, count: int) -> bool:
if self.buckets[key] >= count:
self.buckets[key] -= count
return True
else:
return False
def test_zone_ttl():
zone = MockZone(size=10, rate=2.0)
assert zone.ttl == 5.0
def test_zone_check():
zone = MockZone()
zone.buckets['a'] = 5
zone.buckets['b'] = 0
assert zone.check('a')
assert not zone.check('b')
def test_zone_increment():
zone = MockZone()
zone.buckets['a'] = 5
zone.increment('a')
assert zone.buckets['a'] == 4
def test_zone_limit():
zone = MockZone()
zone.buckets['a'] = 5
zone.buckets['b'] = 0
assert not zone.limit('a')
assert zone.limit('b')
assert zone.buckets['a'] == 4
assert zone.buckets['b'] == 0
def test_zone_hard_limit():
zone = MockZone()
zone.buckets['a'] = 5
zone.buckets['b'] = 0
zone.hard_limit('a')
with pytest.raises(LimitExceededException):
zone.hard_limit('b')
|
[
"pytest.raises"
] |
[((1321, 1358), 'pytest.raises', 'pytest.raises', (['LimitExceededException'], {}), '(LimitExceededException)\n', (1334, 1358), False, 'import pytest\n')]
|
from msilib import Table
from tkinter import *
import tkinter as tk
from tkinter import filedialog
from pandastable import Table,TableModel
import pandas as pd
from Hesapla import MC_Karar_Agaci
#gerekli değişkenler
test_sinir_indeks = 0
#pencere oluşturma
root = Tk()
root.title("Karar Ağacı Projesi")
root.geometry("1480x800")
frame1 = Frame(root)
frame1.pack()
#label ekleme
title = Label(frame1,text="\n Cevher ile Muhammed'in sınıflandırma için otomatik karar ağacı programına hoşgeldiniz..\n\n",font=16,fg="purple")
title.grid()
def Load():
root2 = Tk()
root2.withdraw()
global file_path
file_path = filedialog.askopenfilename(filetypes=(("All files", "*.*"),("Csv Files", "*.csv"),("Data Files", "*.data")))
global dataset
dataset = pd.read_csv(file_path)
frame2 = Frame(root)
frame2.pack(side=BOTTOM) # asagıda cerceve olusmasını sağladık.
pt = Table(frame2, dataframe=dataset, showstatusbar=True, showtoolbar=True,width=1000,height=500)
pt.show()
def getResult():
root3 = Tk()
root3.title("Model - Başarı")
root3.geometry("1480x800")
test_sinir_indeks = int(trainingLimitEntry.get())
trainData = dataset.iloc[0:test_sinir_indeks] # train
testData = dataset.iloc[test_sinir_indeks:dataset.shape[0]+1] # test
# model oluştur.
MC = MC_Karar_Agaci()
hedefNitelikAdi = targetColumnEntry.get()
R,model = MC.modelOlustur(trainData, hedefNitelikAdi)
# Tahmin yap
print("\n")
sonuc = MC.tahminEt(root=R, test=testData, i=test_sinir_indeks) # i test verisinin kaçıncı indisten başladığı.
print("Tahmin sonucu :", sonuc)
frame3 = Frame(root3)
frame3.pack(side=LEFT)
listbox = Listbox(frame3,width=50, height=50,font=16)
for i in model:
listbox.insert(END,i)
listbox.pack(fill=BOTH, expand=0)
frame4 = Frame(root3)
frame4.pack(side=RIGHT)
score=0
index = 0
for i in testData[hedefNitelikAdi]:
if i == sonuc[index]:
score = score + 1
if len(sonuc)-1 == index:
break
index = index + 1
accuracy_score = score / len(testData[hedefNitelikAdi])
print(accuracy_score)
list = []
list.append("Sonuçlar")
list.append("Accuracy Score : " + str(accuracy_score))
list.append("")
list.append("")
for i in range(len(sonuc)):
list.append("P:" + str(sonuc[i])+" T:" + str(testData.iloc[i][hedefNitelikAdi]))
listbox2 = Listbox(frame4, width=50, height=50,font=16)
for i in list:
listbox2.insert(END, i)
listbox2.pack(fill=BOTH, expand=0)
root3.mainloop()
LoadDatasetBtn = Button(frame1, text=" Dataset seç ", fg="blue", command=Load,font=16)
LoadDatasetBtn.grid(row=2)
spacerLabel = Label(frame1,text=" ")
spacerLabel.grid(row=3, column=0, sticky=W, pady=1)
targetColumnLabel = Label(frame1,text="Hedef Kolonu Giriniz: \n",font=14)
targetColumnLabel.grid(row=4, column=0, sticky=W, pady=1)
targetColumnEntry = Entry(frame1,font=14)
targetColumnEntry.grid(row=4, column=0,sticky=N)
maxDeptLabel = Label(frame1,text="*iptal* Maksimum Derinlik: \n",font=14)
maxDeptLabel.grid(row=5, column=0, sticky=W)
maxDeptEntry = Entry(frame1,font=14)
maxDeptEntry.grid(row=5,column=0,sticky=N)
trainingLimitLabel = Label(frame1,text="Eğitim veriseti sınır indeksi:\n",font=14)
trainingLimitLabel.grid(row=6, column=0, sticky=W)
trainingLimitEntry = Entry(frame1,font=14)
trainingLimitEntry.grid(row=6, column = 0,sticky=N)
getResultBtn = Button(frame1,text="Sonuçları Göster",fg="green" ,command=getResult,font=16)
getResultBtn.grid(row=7)
root.mainloop()
|
[
"pandas.read_csv",
"Hesapla.MC_Karar_Agaci",
"pandastable.Table",
"tkinter.filedialog.askopenfilename"
] |
[((631, 745), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'filetypes': "(('All files', '*.*'), ('Csv Files', '*.csv'), ('Data Files', '*.data'))"}), "(filetypes=(('All files', '*.*'), ('Csv Files',\n '*.csv'), ('Data Files', '*.data')))\n", (657, 745), False, 'from tkinter import filedialog\n'), ((774, 796), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (785, 796), True, 'import pandas as pd\n'), ((901, 999), 'pandastable.Table', 'Table', (['frame2'], {'dataframe': 'dataset', 'showstatusbar': '(True)', 'showtoolbar': '(True)', 'width': '(1000)', 'height': '(500)'}), '(frame2, dataframe=dataset, showstatusbar=True, showtoolbar=True,\n width=1000, height=500)\n', (906, 999), False, 'from pandastable import Table, TableModel\n'), ((1328, 1344), 'Hesapla.MC_Karar_Agaci', 'MC_Karar_Agaci', ([], {}), '()\n', (1342, 1344), False, 'from Hesapla import MC_Karar_Agaci\n')]
|
import numpy as np
import torch
class UnityEnv():
"""Unity Reacher Environment Wrapper
https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Learning-Environment-Examples.md
"""
def __init__(self, env_file='data/Reacher.exe', no_graphics=True, mlagents=False):
if mlagents:
from mlagents.envs.environment import UnityEnvironment
else:
from unityagents import UnityEnvironment
self.env = UnityEnvironment(file_name=env_file, no_graphics=no_graphics)
self.brain_name = self.env.brain_names[0]
brain = self.env.brains[self.brain_name]
self.action_size = brain.vector_action_space_size
if type(self.action_size) != int:
self.action_size = self.action_size[0]
env_info = self.env.reset(train_mode=True)[self.brain_name]
self.state_size = env_info.vector_observations.shape[1]
self.num_agents = len(env_info.agents)
def reset(self, train=True):
env_info = self.env.reset(train_mode=train)[self.brain_name]
return env_info.vector_observations
def close(self):
self.env.close()
def step(self, actions):
actions = np.clip(actions, -1, 1)
env_info = self.env.step(actions)[self.brain_name]
next_states = env_info.vector_observations
rewards = env_info.rewards
dones = env_info.local_done
return next_states, np.array(rewards), np.array(dones)
@property
def action_shape(self):
return (self.num_agents, self.action_size)
|
[
"numpy.array",
"unityagents.UnityEnvironment",
"numpy.clip"
] |
[((468, 529), 'unityagents.UnityEnvironment', 'UnityEnvironment', ([], {'file_name': 'env_file', 'no_graphics': 'no_graphics'}), '(file_name=env_file, no_graphics=no_graphics)\n', (484, 529), False, 'from unityagents import UnityEnvironment\n'), ((1213, 1236), 'numpy.clip', 'np.clip', (['actions', '(-1)', '(1)'], {}), '(actions, -1, 1)\n', (1220, 1236), True, 'import numpy as np\n'), ((1447, 1464), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (1455, 1464), True, 'import numpy as np\n'), ((1466, 1481), 'numpy.array', 'np.array', (['dones'], {}), '(dones)\n', (1474, 1481), True, 'import numpy as np\n')]
|
"""
Test fastview/permissions.py
"""
import pytest
from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser
from .app.models import Entry
def test_public__public_can_access(test_data, request_public):
perm = Public()
assert perm.check(request_public) is True
assert perm.filter(request_public, test_data).count() == test_data.count()
def test_login__public_cannot_access(test_data, request_public):
perm = Login()
assert perm.check(request_public) is False
assert perm.filter(request_public, test_data).count() == 0
def test_login__authed_can_access(test_data, request_owner):
perm = Login()
assert perm.check(request_owner) is True
assert perm.filter(request_owner, test_data).count() == test_data.count()
def test_staff__public_cannot_access(test_data, request_public):
perm = Staff()
assert perm.check(request_public) is False
assert perm.filter(request_public, test_data).count() == 0
def test_staff__authed_cannot_access(test_data, request_owner):
perm = Staff()
assert perm.check(request_owner) is False
assert perm.filter(request_owner, test_data).count() == 0
def test_staff__staff_can_access(test_data, request_staff):
perm = Staff()
assert perm.check(request_staff) is True
assert perm.filter(request_staff, test_data).count() == test_data.count()
def test_superuser__public_cannot_access(test_data, request_public):
perm = Superuser()
assert perm.check(request_public) is False
assert perm.filter(request_public, test_data).count() == 0
def test_superuser__authed_cannot_access(test_data, request_owner):
perm = Superuser()
assert perm.check(request_owner) is False
assert perm.filter(request_owner, test_data).count() == 0
def test_superuser__staff_cannot_access(test_data, request_staff):
perm = Superuser()
assert perm.check(request_staff) is False
assert perm.filter(request_staff, test_data).count() == 0
def test_superuser__superuser_can_access(test_data, request_superuser):
perm = Superuser()
assert perm.check(request_superuser) is True
assert perm.filter(request_superuser, test_data).count() == test_data.count()
def test_django__public_cannot_access(test_data, request_public):
perm = Django(action="add")
assert perm.check(request_public, model=Entry) is False
assert perm.filter(request_public, test_data).count() == 0
def test_django__authed_cannot_access(test_data, request_owner):
perm = Django(action="add")
assert perm.check(request_owner, model=Entry) is False
assert perm.filter(request_owner, test_data).count() == 0
def test_django__staff_cannot_access(test_data, request_staff):
perm = Django(action="add")
assert perm.check(request_staff, model=Entry) is False
assert perm.filter(request_staff, test_data).count() == 0
def test_django__superuser_can_access(test_data, request_superuser):
perm = Django(action="add")
assert perm.check(request_superuser, model=Entry) is True
assert perm.filter(request_superuser, test_data).count() == test_data.count()
@pytest.mark.django_db
def test_django__user_with_permission_can_access(
test_data, request_other, user_other, add_entry_permission
):
user_other.user_permissions.add(add_entry_permission)
perm = Django(action="add")
assert perm.check(request_other, model=Entry) is True
assert perm.filter(request_other, test_data).count() == test_data.count()
def test_owner__public_cannot_access(test_data, request_public):
perm = Owner(owner_field="author")
# Test data is ordered, the first is owned by user_owner
owned = test_data.first()
assert perm.check(request_public, instance=owned) is False
assert perm.filter(request_public, test_data).count() == 0
def test_owner__owner_can_access_theirs(test_data, request_owner, user_owner):
perm = Owner(owner_field="author")
owned = test_data.first()
assert perm.check(request_owner, instance=owned) is True
assert perm.filter(request_owner, test_data).count() == 2
assert perm.filter(request_owner, test_data).filter(author=user_owner).count() == 2
def test_owner__other_can_access_theirs(test_data, request_other, user_other):
perm = Owner(owner_field="author")
owned = test_data.first()
assert perm.check(request_other, instance=owned) is False
assert perm.filter(request_other, test_data).count() == 2
assert perm.filter(request_other, test_data).filter(author=user_other).count() == 2
def test_owner__staff_cannot_access(test_data, request_staff):
perm = Owner(owner_field="author")
owned = test_data.first()
assert perm.check(request_staff, instance=owned) is False
assert perm.filter(request_staff, test_data).count() == 0
def test_owner__superuser_cannot_access(test_data, request_superuser):
perm = Owner(owner_field="author")
owned = test_data.first()
assert perm.check(request_superuser, instance=owned) is False
assert perm.filter(request_superuser, test_data).count() == 0
def test_and__owner_and_staff__owner_cannot_access(test_data, request_owner):
perm = Owner(owner_field="author") & Staff()
owned = test_data.first()
assert perm.check(request_owner, instance=owned) is False
assert perm.filter(request_owner, test_data).count() == 0
def test_and__owner_and_staff__staff_cannot_access(test_data, request_staff):
perm = Owner(owner_field="author") & Staff()
owned = test_data.first()
assert perm.check(request_staff, instance=owned) is False
assert perm.filter(request_staff, test_data).count() == 0
def test_and__owner_and_staff__staff_owner_can_access(
test_data, request_owner, user_owner
):
perm = Owner(owner_field="author") & Staff()
owned = test_data.first()
user_owner.is_staff = True
user_owner.save()
assert perm.check(request_owner, instance=owned) is True
assert perm.filter(request_owner, test_data).count() == 2
def test_or__owner_or_staff__owner_can_access(test_data, request_owner):
perm = Owner(owner_field="author") | Staff()
owned = test_data.first()
assert perm.check(request_owner, instance=owned) is True
assert perm.filter(request_owner, test_data).count() == 2
def test_or__owner_or_staff__staff_can_access(test_data, request_staff):
perm = Owner(owner_field="author") | Staff()
owned = test_data.first()
assert perm.check(request_staff, instance=owned) is True
assert perm.filter(request_staff, test_data).count() == 4
def test_or__owner_or_staff__staff_owner_can_access(
test_data, request_owner, user_owner
):
perm = Owner(owner_field="author") | Staff()
owned = test_data.first()
user_owner.is_staff = True
user_owner.save()
assert perm.check(request_owner, instance=owned) is True
assert perm.filter(request_owner, test_data).count() == 4
def test_or__owner_or_staff__other_cannot_access(test_data, request_other, user_other):
perm = Owner(owner_field="author") | Staff()
owned = test_data.first()
assert perm.check(request_other, instance=owned) is False
assert perm.filter(request_other, test_data).count() == 2
assert perm.filter(request_other, test_data).filter(author=user_other).count() == 2
def test_not__not_owner__all_can_access_all_except_own(
test_data, request_owner, user_owner
):
perm = ~Owner(owner_field="author")
owned = test_data.first()
not_owned = test_data.exclude(author=user_owner).first()
assert perm.check(request_owner, instance=owned) is False
assert perm.check(request_owner, instance=not_owned) is True
assert perm.filter(request_owner, test_data).count() == 2
assert perm.filter(request_owner, test_data).filter(author=user_owner).count() == 0
def test_and_not__staff_not_owner__staff_can_access_all_except_own(
test_data, request_owner, user_owner
):
perm = Staff() & ~Owner(owner_field="author")
owned = test_data.first()
not_owned = test_data.exclude(author=user_owner).first()
user_owner.is_staff = True
user_owner.save()
assert perm.check(request_owner, instance=owned) is False
assert perm.check(request_owner, instance=not_owned) is True
assert perm.filter(request_owner, test_data).count() == 2
assert perm.filter(request_owner, test_data).filter(author=user_owner).count() == 0
|
[
"fastview.permissions.Superuser",
"fastview.permissions.Django",
"fastview.permissions.Public",
"fastview.permissions.Staff",
"fastview.permissions.Owner",
"fastview.permissions.Login"
] |
[((239, 247), 'fastview.permissions.Public', 'Public', ([], {}), '()\n', (245, 247), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((451, 458), 'fastview.permissions.Login', 'Login', ([], {}), '()\n', (456, 458), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((643, 650), 'fastview.permissions.Login', 'Login', ([], {}), '()\n', (648, 650), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((852, 859), 'fastview.permissions.Staff', 'Staff', ([], {}), '()\n', (857, 859), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((1047, 1054), 'fastview.permissions.Staff', 'Staff', ([], {}), '()\n', (1052, 1054), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((1236, 1243), 'fastview.permissions.Staff', 'Staff', ([], {}), '()\n', (1241, 1243), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((1449, 1460), 'fastview.permissions.Superuser', 'Superuser', ([], {}), '()\n', (1458, 1460), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((1652, 1663), 'fastview.permissions.Superuser', 'Superuser', ([], {}), '()\n', (1661, 1663), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((1852, 1863), 'fastview.permissions.Superuser', 'Superuser', ([], {}), '()\n', (1861, 1863), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((2057, 2068), 'fastview.permissions.Superuser', 'Superuser', ([], {}), '()\n', (2066, 2068), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((2279, 2299), 'fastview.permissions.Django', 'Django', ([], {'action': '"""add"""'}), "(action='add')\n", (2285, 2299), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((2501, 2521), 'fastview.permissions.Django', 'Django', ([], {'action': '"""add"""'}), "(action='add')\n", (2507, 2521), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((2720, 2740), 'fastview.permissions.Django', 'Django', ([], {'action': '"""add"""'}), "(action='add')\n", (2726, 2740), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((2944, 2964), 'fastview.permissions.Django', 'Django', ([], {'action': '"""add"""'}), "(action='add')\n", (2950, 2964), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((3319, 3339), 'fastview.permissions.Django', 'Django', ([], {'action': '"""add"""'}), "(action='add')\n", (3325, 3339), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((3554, 3581), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (3559, 3581), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((3891, 3918), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (3896, 3918), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((4252, 4279), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (4257, 4279), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((4598, 4625), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (4603, 4625), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((4864, 4891), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (4869, 4891), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((5145, 5172), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (5150, 5172), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((5175, 5182), 'fastview.permissions.Staff', 'Staff', ([], {}), '()\n', (5180, 5182), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((5428, 5455), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (5433, 5455), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((5458, 5465), 'fastview.permissions.Staff', 'Staff', ([], {}), '()\n', (5463, 5465), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((5732, 5759), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (5737, 5759), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((5762, 5769), 'fastview.permissions.Staff', 'Staff', ([], {}), '()\n', (5767, 5769), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((6062, 6089), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (6067, 6089), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((6092, 6099), 'fastview.permissions.Staff', 'Staff', ([], {}), '()\n', (6097, 6099), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((6339, 6366), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (6344, 6366), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((6369, 6376), 'fastview.permissions.Staff', 'Staff', ([], {}), '()\n', (6374, 6376), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((6640, 6667), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (6645, 6667), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((6670, 6677), 'fastview.permissions.Staff', 'Staff', ([], {}), '()\n', (6675, 6677), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((6985, 7012), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (6990, 7012), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((7015, 7022), 'fastview.permissions.Staff', 'Staff', ([], {}), '()\n', (7020, 7022), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((7379, 7406), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (7384, 7406), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((7900, 7907), 'fastview.permissions.Staff', 'Staff', ([], {}), '()\n', (7905, 7907), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n'), ((7911, 7938), 'fastview.permissions.Owner', 'Owner', ([], {'owner_field': '"""author"""'}), "(owner_field='author')\n", (7916, 7938), False, 'from fastview.permissions import Django, Login, Owner, Public, Staff, Superuser\n')]
|
from UE4Parse.BinaryReader import BinaryStream
from UE4Parse.Assets.Objects.FName import FName
from UE4Parse.Versions.EUnrealEngineObjectUE4Version import UE4Versions
from UE4Parse.Assets.Objects.FGuid import FGuid
from Usmap import StructProps
from Usmap.Objects.FPropertyTag import FPropertyTag as UsmapTag
class FPropertyTag2:
def __init__(self, **kwargs) -> None:
for k,v in kwargs.items():
setattr(self, k, v)
class FPropertyTag:
ArrayIndex = 0
position = 0
BoolVal: int
EnumName: FName
EnumType: FName
HasPropertyGuid: bool = 0
InnerType: FName
Name: FName
PropertyGuid: FGuid
Size: int
SizeOffset: int
StructGuid: FGuid
StructName: FName
Type: FName
ValueType: FName
def __init__(self, reader: BinaryStream, propMappings: StructProps = None):
if propMappings:
propdata = propMappings.data
self.Name = FName(propMappings.Name)
self.ArrayIndex = propMappings.ArraySize
# data section
for attr in ["EnumName", "InnerType", "StructName", "ValueType", "Type"]:
val = getattr(propdata, attr, None)
if val is None:
continue
if attr == "InnerType":
self.InnerData = val #FPropertyTag2(**val)
elif attr == "ValueType":
self.ValueData = val #FPropertyTag2(val)
if isinstance(val, str):
val = FName(val)
if isinstance(val, UsmapTag):
val = FName(val.Type)
setattr(self, attr, val)
return
self.Name = reader.readFName()
if self.Name.isNone:
return
self.Type = reader.readFName()
self.Size = reader.readInt32()
self.ArrayIndex = reader.readInt32()
self.position = reader.base_stream.tell()
if self.Type.Number == 0:
Type = self.Type.string
if Type == "StructProperty":
self.StructName = reader.readFName()
if reader.version >= UE4Versions.VER_UE4_STRUCT_GUID_IN_PROPERTY_TAG:
self.StructGuid = FGuid(reader)
elif Type == "BoolProperty":
self.BoolVal = reader.readByteToInt()
elif Type == "ByteProperty" or Type == "EnumProperty":
self.EnumName = reader.readFName()
elif Type == "ArrayProperty":
if reader.version >= UE4Versions.VAR_UE4_ARRAY_PROPERTY_INNER_TAGS:
self.InnerType = reader.readFName()
elif Type == "SetProperty":
if reader.version >= UE4Versions.VER_UE4_PROPERTY_TAG_SET_MAP_SUPPORT:
self.InnerType = reader.readFName()
elif Type == "MapProperty":
if reader.version >= UE4Versions.VER_UE4_PROPERTY_TAG_SET_MAP_SUPPORT:
self.InnerType = reader.readFName()
self.ValueType = reader.readFName()
HasPropertyGuid = reader.readByteToInt()
if HasPropertyGuid != 0:
FGuid(reader)
self.end_pos = reader.tell()
def __repr__(self):
return f"<{self.Name.string} : {self.Type.string}>"
|
[
"UE4Parse.Assets.Objects.FName.FName",
"UE4Parse.Assets.Objects.FGuid.FGuid"
] |
[((933, 957), 'UE4Parse.Assets.Objects.FName.FName', 'FName', (['propMappings.Name'], {}), '(propMappings.Name)\n', (938, 957), False, 'from UE4Parse.Assets.Objects.FName import FName\n'), ((3161, 3174), 'UE4Parse.Assets.Objects.FGuid.FGuid', 'FGuid', (['reader'], {}), '(reader)\n', (3166, 3174), False, 'from UE4Parse.Assets.Objects.FGuid import FGuid\n'), ((1525, 1535), 'UE4Parse.Assets.Objects.FName.FName', 'FName', (['val'], {}), '(val)\n', (1530, 1535), False, 'from UE4Parse.Assets.Objects.FName import FName\n'), ((1608, 1623), 'UE4Parse.Assets.Objects.FName.FName', 'FName', (['val.Type'], {}), '(val.Type)\n', (1613, 1623), False, 'from UE4Parse.Assets.Objects.FName import FName\n'), ((2235, 2248), 'UE4Parse.Assets.Objects.FGuid.FGuid', 'FGuid', (['reader'], {}), '(reader)\n', (2240, 2248), False, 'from UE4Parse.Assets.Objects.FGuid import FGuid\n')]
|
import sys
import random
import pygame
def create_player_cards():
# 创建卡片信息,player
_card = [x for x in range(13)]
cards = []
player = [[], [], [], []]
# 单副牌(除去大小王)
for x in range(4):
color = list(map(lambda n: (n, x), _card))
cards = cards + color
# 再加一副牌
cards = cards * 2
# 洗牌
count = 0
random.shuffle(cards)
# 发牌
for ct in cards:
player[count % 4].append(ct)
count += 1
return player
def sort_by_card(_card):
n, _ = _card
if n <= 1:
n += 13
return n
'''--------------main-----------------'''
# 初始化显示
pygame.init()
size = width, height = 1280, 720
black = 0, 0, 0
screen = pygame.display.set_mode(size)
# 载入牌面
card_colors = ('k', 'l', 'p', 's') # 花色
card_images = [[], [], [], []]
for c in range(4):
for i in range(1, 14):
img = pygame.image.load(f"img/{card_colors[c]}{i}.png")
card_images[c].append(img) # 载入所有牌面
players_cards = create_player_cards()
l_count = 0
for li in range(4):
r_count = 0
players_cards[li].sort(key=sort_by_card)
for c in players_cards[li]:
card, c_colors = c
screen.blit(card_images[c_colors][card], (150 + r_count, 50 + l_count))
pygame.time.wait(10)
pygame.display.flip()
r_count += 30
l_count += 100
# 主循环
while 1:
# 处理退出
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
|
[
"pygame.event.get",
"pygame.display.set_mode",
"random.shuffle",
"pygame.init",
"pygame.display.flip",
"pygame.time.wait",
"pygame.image.load",
"sys.exit"
] |
[((649, 662), 'pygame.init', 'pygame.init', ([], {}), '()\n', (660, 662), False, 'import pygame\n'), ((724, 753), 'pygame.display.set_mode', 'pygame.display.set_mode', (['size'], {}), '(size)\n', (747, 753), False, 'import pygame\n'), ((366, 387), 'random.shuffle', 'random.shuffle', (['cards'], {}), '(cards)\n', (380, 387), False, 'import random\n'), ((1432, 1450), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1448, 1450), False, 'import pygame\n'), ((901, 950), 'pygame.image.load', 'pygame.image.load', (['f"""img/{card_colors[c]}{i}.png"""'], {}), "(f'img/{card_colors[c]}{i}.png')\n", (918, 950), False, 'import pygame\n'), ((1288, 1308), 'pygame.time.wait', 'pygame.time.wait', (['(10)'], {}), '(10)\n', (1304, 1308), False, 'import pygame\n'), ((1318, 1339), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (1337, 1339), False, 'import pygame\n'), ((1504, 1514), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1512, 1514), False, 'import sys\n')]
|
"""
Scatterplot in one dimension only
"""
from __future__ import absolute_import
from numpy import empty
# Enthought library imports
from enable.api import black_color_trait, ColorTrait, MarkerTrait
from traits.api import Any, Bool, Callable, Enum, Float, Str
# local imports
from .base_1d_plot import Base1DPlot
from .scatterplot import render_markers
class ScatterPlot1D(Base1DPlot):
""" A scatterplot that in 1D """
# The type of marker to use. This is a mapped trait using strings as the
# keys.
marker = MarkerTrait
# The pixel size of the marker, not including the thickness of the outline.
marker_size = Float(4.0)
# The CompiledPath to use if **marker** is set to "custom". This attribute
# must be a compiled path for the Kiva context onto which this plot will
# be rendered. Usually, importing kiva.GraphicsContext will do
# the right thing.
custom_symbol = Any
# The function which actually renders the markers
render_markers_func = Callable(render_markers)
# The thickness, in pixels, of the outline to draw around the marker. If
# this is 0, no outline is drawn.
line_width = Float(1.0)
# The fill color of the marker.
color = black_color_trait
# The color of the outline to draw around the marker.
outline_color = black_color_trait
#------------------------------------------------------------------------
# Selection and selection rendering
# A selection on the lot is indicated by setting the index or value
# datasource's 'selections' metadata item to a list of indices, or the
# 'selection_mask' metadata to a boolean array of the same length as the
# datasource.
#------------------------------------------------------------------------
#: the plot data metadata name to watch for selection information
selection_metadata_name = Str("selections")
#: whether or not to display a selection
show_selection = Bool(True)
#: the marker type for selected points
selection_marker = MarkerTrait
#: the marker size for selected points
selection_marker_size = Float(4.0)
#: the thickness, in pixels, of the selected points
selection_line_width = Float(1.0)
#: the color of the selected points
selection_color = ColorTrait("yellow")
#: the outline color of the selected points
selection_outline_color = black_color_trait
#: The fade amount for unselected regions
unselected_alpha = Float(0.3)
#: The marker outline width to use for unselected points
unselected_line_width = Float(1.0)
#: alignment of markers relative to non-index direction
alignment = Enum("center", "left", "right", "top", "bottom")
#: offset of markers relative to non-index direction in pixels
marker_offset = Float
#: private trait holding postion of markers relative to non-index direction
_marker_position = Float
def _draw_plot(self, gc, view_bounds=None, mode="normal"):
coord = self._compute_screen_coord()
pts = empty(shape=(len(coord), 2))
if self.orientation == 'v':
pts[:, 1] = coord
pts[:, 0] = self._marker_position
else:
pts[:, 0] = coord
pts[:, 1] = self._marker_position
self._render(gc, pts)
def _render(self, gc, pts):
with gc:
gc.clip_to_rect(self.x, self.y, self.width, self.height)
if not self.index:
return
name = self.selection_metadata_name
md = self.index.metadata
if name in md and md[name] is not None and len(md[name]) > 0:
selected_mask = md[name][0]
selected_pts = pts[selected_mask]
unselected_pts = pts[~selected_mask]
color = list(self.color_)
color[3] *= self.unselected_alpha
outline_color = list(self.outline_color_)
outline_color[3] *= self.unselected_alpha
if unselected_pts.size > 0:
self.render_markers_func(gc, unselected_pts, self.marker,
self.marker_size, tuple(color),
self.unselected_line_width, tuple(outline_color),
self.custom_symbol)
if selected_pts.size > 0:
self.render_markers_func(gc, selected_pts, self.marker,
self.marker_size, self.selection_color_,
self.line_width, self.outline_color_,
self.custom_symbol)
else:
self.render_markers_func(gc, pts, self.marker,
self.marker_size, self.color_, self.line_width,
self.outline_color_, self.custom_symbol)
def __marker_positon_default(self):
return self._get_marker_position()
def _get_marker_position(self):
x, y = self.position
w, h = self.bounds
if self.orientation == 'v':
y, h = x, w
if self.alignment == 'center':
position = y + h/2.0
elif self.alignment in ['left', 'bottom']:
position = y
elif self.alignment in ['right', 'top']:
position = y + h
position += self.marker_offset
return position
def _bounds_changed(self, old, new):
super(ScatterPlot1D, self)._bounds_changed(old, new)
self._marker_position = self._get_marker_position()
def _bounds_items_changed(self, event):
super(ScatterPlot1D, self)._bounds_items_changed(event)
self._marker_position = self._get_marker_position()
def _orientation_changed(self):
super(ScatterPlot1D, self)._orientation_changed()
self._marker_position = self._get_marker_position()
def _alignment_changed(self):
self._marker_position = self._get_marker_position()
|
[
"traits.api.Float",
"traits.api.Callable",
"traits.api.Bool",
"enable.api.ColorTrait",
"traits.api.Str",
"traits.api.Enum"
] |
[((645, 655), 'traits.api.Float', 'Float', (['(4.0)'], {}), '(4.0)\n', (650, 655), False, 'from traits.api import Any, Bool, Callable, Enum, Float, Str\n'), ((1009, 1033), 'traits.api.Callable', 'Callable', (['render_markers'], {}), '(render_markers)\n', (1017, 1033), False, 'from traits.api import Any, Bool, Callable, Enum, Float, Str\n'), ((1168, 1178), 'traits.api.Float', 'Float', (['(1.0)'], {}), '(1.0)\n', (1173, 1178), False, 'from traits.api import Any, Bool, Callable, Enum, Float, Str\n'), ((1883, 1900), 'traits.api.Str', 'Str', (['"""selections"""'], {}), "('selections')\n", (1886, 1900), False, 'from traits.api import Any, Bool, Callable, Enum, Float, Str\n'), ((1968, 1978), 'traits.api.Bool', 'Bool', (['(True)'], {}), '(True)\n', (1972, 1978), False, 'from traits.api import Any, Bool, Callable, Enum, Float, Str\n'), ((2130, 2140), 'traits.api.Float', 'Float', (['(4.0)'], {}), '(4.0)\n', (2135, 2140), False, 'from traits.api import Any, Bool, Callable, Enum, Float, Str\n'), ((2225, 2235), 'traits.api.Float', 'Float', (['(1.0)'], {}), '(1.0)\n', (2230, 2235), False, 'from traits.api import Any, Bool, Callable, Enum, Float, Str\n'), ((2299, 2319), 'enable.api.ColorTrait', 'ColorTrait', (['"""yellow"""'], {}), "('yellow')\n", (2309, 2319), False, 'from enable.api import black_color_trait, ColorTrait, MarkerTrait\n'), ((2487, 2497), 'traits.api.Float', 'Float', (['(0.3)'], {}), '(0.3)\n', (2492, 2497), False, 'from traits.api import Any, Bool, Callable, Enum, Float, Str\n'), ((2588, 2598), 'traits.api.Float', 'Float', (['(1.0)'], {}), '(1.0)\n', (2593, 2598), False, 'from traits.api import Any, Bool, Callable, Enum, Float, Str\n'), ((2676, 2724), 'traits.api.Enum', 'Enum', (['"""center"""', '"""left"""', '"""right"""', '"""top"""', '"""bottom"""'], {}), "('center', 'left', 'right', 'top', 'bottom')\n", (2680, 2724), False, 'from traits.api import Any, Bool, Callable, Enum, Float, Str\n')]
|
from django.forms import ModelForm
from .models import contact
from django import forms
class ContactForm(ModelForm):
class Meta:
model = contact
fields = ['name', 'email', 'relation']
Father = 'Father'
Mother = 'Mother'
Brother = 'Brother'
Sister = 'Sister'
Husband = 'Husband'
Friend = 'Friend'
Relative = 'Relative'
Other = 'Other'
relations = (
(Father, 'Father'),
(Mother, 'Mother'),
(Brother, 'Brother'),
(Sister, 'Sister'),
(Husband, 'Husband'),
(Friend, 'Friend'),
(Relative, 'Relative'),
(Other, 'Other'),
)
widgets = {
'relation': forms.Select(choices=relations, attrs={'class': 'form-control'}),
}
|
[
"django.forms.Select"
] |
[((760, 824), 'django.forms.Select', 'forms.Select', ([], {'choices': 'relations', 'attrs': "{'class': 'form-control'}"}), "(choices=relations, attrs={'class': 'form-control'})\n", (772, 824), False, 'from django import forms\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Badge',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=150)),
('slug', models.CharField(max_length=150)),
('priority', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='Offer',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('description', models.TextField()),
('requirements', models.TextField()),
('time_commitment', models.TextField()),
('benefits', models.TextField()),
('location', models.CharField(max_length=150)),
('title', models.CharField(max_length=150)),
('started_at', models.DateTimeField(blank=True, null=True)),
('finished_at', models.DateTimeField(blank=True, null=True)),
('time_period', models.CharField(blank=True, default='', max_length=150)),
('status_old', models.CharField(default='NEW', max_length=30, null=True)),
('offer_status', models.CharField(default='unpublished', choices=[('unpublished', 'Unpublished'), ('published', 'Published'), ('rejected', 'Rejected')], max_length=16)),
('recruitment_status', models.CharField(default='open', choices=[('open', 'Open'), ('supplemental', 'Supplemental'), ('closed', 'Closed')], max_length=16)),
('action_status', models.CharField(default='ongoing', choices=[('future', 'Future'), ('ongoing', 'Ongoing'), ('finished', 'Finished')], max_length=16)),
('votes', models.BooleanField(default=0)),
('recruitment_start_date', models.DateTimeField(blank=True, null=True)),
('recruitment_end_date', models.DateTimeField(blank=True, null=True)),
('reserve_recruitment', models.BooleanField(default=True)),
('reserve_recruitment_start_date', models.DateTimeField(blank=True, null=True)),
('reserve_recruitment_end_date', models.DateTimeField(blank=True, null=True)),
('action_ongoing', models.BooleanField(default=False)),
('constant_coop', models.BooleanField(default=False)),
('action_start_date', models.DateTimeField(blank=True, null=True)),
('action_end_date', models.DateTimeField(blank=True, null=True)),
('volunteers_limit', models.IntegerField(blank=True, default=0, null=True)),
],
),
migrations.CreateModel(
name='OfferImage',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('path', models.ImageField(upload_to='offers/')),
('is_main', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('offer', models.ForeignKey(to='volontulo.Offer')),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=150)),
('address', models.CharField(max_length=150)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='OrganizationGallery',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('path', models.ImageField(upload_to='gallery/')),
('is_main', models.BooleanField(default=False)),
('organization', models.ForeignKey(to='volontulo.Organization', related_name='images')),
],
),
migrations.CreateModel(
name='UserBadges',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('description', models.CharField(max_length=255)),
('counter', models.IntegerField(blank=True, default=0)),
('badge', models.ForeignKey(to='volontulo.Badge')),
('content_type', models.ForeignKey(null=True, to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='UserGallery',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('image', models.ImageField(upload_to='profile/')),
('is_avatar', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('is_administrator', models.BooleanField(default=False)),
('uuid', models.UUIDField(default=uuid.uuid4, unique=True)),
('badges', models.ManyToManyField(to='volontulo.Badge', through='volontulo.UserBadges', related_name='user_profile')),
('organizations', models.ManyToManyField(to='volontulo.Organization', related_name='userprofiles')),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='usergallery',
name='userprofile',
field=models.ForeignKey(to='volontulo.UserProfile', related_name='images'),
),
migrations.AddField(
model_name='userbadges',
name='userprofile',
field=models.ForeignKey(to='volontulo.UserProfile', db_column='userprofile_id'),
),
migrations.AddField(
model_name='organizationgallery',
name='published_by',
field=models.ForeignKey(to='volontulo.UserProfile', related_name='gallery'),
),
migrations.AddField(
model_name='offerimage',
name='userprofile',
field=models.ForeignKey(to='volontulo.UserProfile', related_name='offerimages'),
),
migrations.AddField(
model_name='offer',
name='organization',
field=models.ForeignKey(to='volontulo.Organization'),
),
migrations.AddField(
model_name='offer',
name='volunteers',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
]
|
[
"django.db.models.TextField",
"django.db.models.OneToOneField",
"django.db.migrations.swappable_dependency",
"django.db.models.ManyToManyField",
"django.db.models.UUIDField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.ImageField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((251, 308), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (282, 308), False, 'from django.db import models, migrations\n'), ((6310, 6378), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""volontulo.UserProfile"""', 'related_name': '"""images"""'}), "(to='volontulo.UserProfile', related_name='images')\n", (6327, 6378), False, 'from django.db import models, migrations\n'), ((6507, 6580), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""volontulo.UserProfile"""', 'db_column': '"""userprofile_id"""'}), "(to='volontulo.UserProfile', db_column='userprofile_id')\n", (6524, 6580), False, 'from django.db import models, migrations\n'), ((6719, 6788), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""volontulo.UserProfile"""', 'related_name': '"""gallery"""'}), "(to='volontulo.UserProfile', related_name='gallery')\n", (6736, 6788), False, 'from django.db import models, migrations\n'), ((6917, 6990), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""volontulo.UserProfile"""', 'related_name': '"""offerimages"""'}), "(to='volontulo.UserProfile', related_name='offerimages')\n", (6934, 6990), False, 'from django.db import models, migrations\n'), ((7115, 7161), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""volontulo.Organization"""'}), "(to='volontulo.Organization')\n", (7132, 7161), False, 'from django.db import models, migrations\n'), ((7284, 7335), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': 'settings.AUTH_USER_MODEL'}), '(to=settings.AUTH_USER_MODEL)\n', (7306, 7335), False, 'from django.db import models, migrations\n'), ((497, 590), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), "(verbose_name='ID', auto_created=True, primary_key=True,\n serialize=False)\n", (513, 590), False, 'from django.db import models, migrations\n'), ((614, 646), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (630, 646), False, 'from django.db import models, migrations\n'), ((674, 706), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (690, 706), False, 'from django.db import models, migrations\n'), ((738, 768), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)'}), '(default=1)\n', (757, 768), False, 'from django.db import models, migrations\n'), ((899, 992), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), "(verbose_name='ID', auto_created=True, primary_key=True,\n serialize=False)\n", (915, 992), False, 'from django.db import models, migrations\n'), ((1023, 1041), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1039, 1041), False, 'from django.db import models, migrations\n'), ((1077, 1095), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1093, 1095), False, 'from django.db import models, migrations\n'), ((1134, 1152), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1150, 1152), False, 'from django.db import models, migrations\n'), ((1184, 1202), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1200, 1202), False, 'from django.db import models, migrations\n'), ((1234, 1266), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (1250, 1266), False, 'from django.db import models, migrations\n'), ((1295, 1327), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (1311, 1327), False, 'from django.db import models, migrations\n'), ((1361, 1404), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1381, 1404), False, 'from django.db import models, migrations\n'), ((1439, 1482), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1459, 1482), False, 'from django.db import models, migrations\n'), ((1517, 1573), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(150)'}), "(blank=True, default='', max_length=150)\n", (1533, 1573), False, 'from django.db import models, migrations\n'), ((1607, 1664), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""NEW"""', 'max_length': '(30)', 'null': '(True)'}), "(default='NEW', max_length=30, null=True)\n", (1623, 1664), False, 'from django.db import models, migrations\n'), ((1700, 1858), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""unpublished"""', 'choices': "[('unpublished', 'Unpublished'), ('published', 'Published'), ('rejected',\n 'Rejected')]", 'max_length': '(16)'}), "(default='unpublished', choices=[('unpublished',\n 'Unpublished'), ('published', 'Published'), ('rejected', 'Rejected')],\n max_length=16)\n", (1716, 1858), False, 'from django.db import models, migrations\n'), ((1892, 2027), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""open"""', 'choices': "[('open', 'Open'), ('supplemental', 'Supplemental'), ('closed', 'Closed')]", 'max_length': '(16)'}), "(default='open', choices=[('open', 'Open'), ('supplemental',\n 'Supplemental'), ('closed', 'Closed')], max_length=16)\n", (1908, 2027), False, 'from django.db import models, migrations\n'), ((2060, 2197), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""ongoing"""', 'choices': "[('future', 'Future'), ('ongoing', 'Ongoing'), ('finished', 'Finished')]", 'max_length': '(16)'}), "(default='ongoing', choices=[('future', 'Future'), (\n 'ongoing', 'Ongoing'), ('finished', 'Finished')], max_length=16)\n", (2076, 2197), False, 'from django.db import models, migrations\n'), ((2221, 2251), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(0)'}), '(default=0)\n', (2240, 2251), False, 'from django.db import models, migrations\n'), ((2297, 2340), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2317, 2340), False, 'from django.db import models, migrations\n'), ((2384, 2427), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2404, 2427), False, 'from django.db import models, migrations\n'), ((2470, 2503), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (2489, 2503), False, 'from django.db import models, migrations\n'), ((2557, 2600), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2577, 2600), False, 'from django.db import models, migrations\n'), ((2652, 2695), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2672, 2695), False, 'from django.db import models, migrations\n'), ((2733, 2767), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2752, 2767), False, 'from django.db import models, migrations\n'), ((2804, 2838), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2823, 2838), False, 'from django.db import models, migrations\n'), ((2879, 2922), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2899, 2922), False, 'from django.db import models, migrations\n'), ((2961, 3004), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2981, 3004), False, 'from django.db import models, migrations\n'), ((3044, 3097), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'default': '(0)', 'null': '(True)'}), '(blank=True, default=0, null=True)\n', (3063, 3097), False, 'from django.db import models, migrations\n'), ((3233, 3326), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), "(verbose_name='ID', auto_created=True, primary_key=True,\n serialize=False)\n", (3249, 3326), False, 'from django.db import models, migrations\n'), ((3350, 3388), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""offers/"""'}), "(upload_to='offers/')\n", (3367, 3388), False, 'from django.db import models, migrations\n'), ((3419, 3453), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (3438, 3453), False, 'from django.db import models, migrations\n'), ((3487, 3526), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (3507, 3526), False, 'from django.db import models, migrations\n'), ((3555, 3594), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""volontulo.Offer"""'}), "(to='volontulo.Offer')\n", (3572, 3594), False, 'from django.db import models, migrations\n'), ((3732, 3825), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), "(verbose_name='ID', auto_created=True, primary_key=True,\n serialize=False)\n", (3748, 3825), False, 'from django.db import models, migrations\n'), ((3849, 3881), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (3865, 3881), False, 'from django.db import models, migrations\n'), ((3912, 3944), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (3928, 3944), False, 'from django.db import models, migrations\n'), ((3979, 3997), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (3995, 3997), False, 'from django.db import models, migrations\n'), ((4142, 4235), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), "(verbose_name='ID', auto_created=True, primary_key=True,\n serialize=False)\n", (4158, 4235), False, 'from django.db import models, migrations\n'), ((4259, 4298), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""gallery/"""'}), "(upload_to='gallery/')\n", (4276, 4298), False, 'from django.db import models, migrations\n'), ((4329, 4363), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (4348, 4363), False, 'from django.db import models, migrations\n'), ((4399, 4468), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""volontulo.Organization"""', 'related_name': '"""images"""'}), "(to='volontulo.Organization', related_name='images')\n", (4416, 4468), False, 'from django.db import models, migrations\n'), ((4604, 4697), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), "(verbose_name='ID', auto_created=True, primary_key=True,\n serialize=False)\n", (4620, 4697), False, 'from django.db import models, migrations\n'), ((4727, 4794), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'default': 'django.utils.timezone.now'}), '(blank=True, default=django.utils.timezone.now)\n', (4747, 4794), False, 'from django.db import models, migrations\n'), ((4829, 4861), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (4845, 4861), False, 'from django.db import models, migrations\n'), ((4892, 4934), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'default': '(0)'}), '(blank=True, default=0)\n', (4911, 4934), False, 'from django.db import models, migrations\n'), ((4963, 5002), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""volontulo.Badge"""'}), "(to='volontulo.Badge')\n", (4980, 5002), False, 'from django.db import models, migrations\n'), ((5038, 5097), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'to': '"""contenttypes.ContentType"""'}), "(null=True, to='contenttypes.ContentType')\n", (5055, 5097), False, 'from django.db import models, migrations\n'), ((5234, 5327), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), "(verbose_name='ID', auto_created=True, primary_key=True,\n serialize=False)\n", (5250, 5327), False, 'from django.db import models, migrations\n'), ((5352, 5391), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""profile/"""'}), "(upload_to='profile/')\n", (5369, 5391), False, 'from django.db import models, migrations\n'), ((5424, 5458), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (5443, 5458), False, 'from django.db import models, migrations\n'), ((5595, 5688), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)'}), "(verbose_name='ID', auto_created=True, primary_key=True,\n serialize=False)\n", (5611, 5688), False, 'from django.db import models, migrations\n'), ((5724, 5758), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (5743, 5758), False, 'from django.db import models, migrations\n'), ((5786, 5835), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'unique': '(True)'}), '(default=uuid.uuid4, unique=True)\n', (5802, 5835), False, 'from django.db import models, migrations\n'), ((5865, 5974), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""volontulo.Badge"""', 'through': '"""volontulo.UserBadges"""', 'related_name': '"""user_profile"""'}), "(to='volontulo.Badge', through='volontulo.UserBadges',\n related_name='user_profile')\n", (5887, 5974), False, 'from django.db import models, migrations\n'), ((6007, 6092), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""volontulo.Organization"""', 'related_name': '"""userprofiles"""'}), "(to='volontulo.Organization', related_name='userprofiles'\n )\n", (6029, 6092), False, 'from django.db import models, migrations\n'), ((6115, 6164), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'to': 'settings.AUTH_USER_MODEL'}), '(to=settings.AUTH_USER_MODEL)\n', (6135, 6164), False, 'from django.db import models, migrations\n')]
|
"""Support for Elgato button."""
from __future__ import annotations
import logging
from elgato import Elgato, ElgatoError, Info
from homeassistant.components.button import ButtonEntity, ButtonEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import HomeAssistantElgatoData
from .const import DOMAIN
from .entity import ElgatoEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Elgato button based on a config entry."""
data: HomeAssistantElgatoData = hass.data[DOMAIN][entry.entry_id]
async_add_entities([ElgatoIdentifyButton(data.client, data.info)])
class ElgatoIdentifyButton(ElgatoEntity, ButtonEntity):
"""Defines an Elgato identify button."""
def __init__(self, client: Elgato, info: Info) -> None:
"""Initialize the button entity."""
super().__init__(client, info)
self.entity_description = ButtonEntityDescription(
key="identify",
name="Identify",
icon="mdi:help",
entity_category=EntityCategory.CONFIG,
)
self._attr_unique_id = f"{info.serial_number}_{self.entity_description.key}"
async def async_press(self) -> None:
"""Identify the light, will make it blink."""
try:
await self.client.identify()
except ElgatoError:
_LOGGER.exception("An error occurred while identifying the Elgato Light")
|
[
"homeassistant.components.button.ButtonEntityDescription",
"logging.getLogger"
] |
[((546, 573), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (563, 573), False, 'import logging\n'), ((1188, 1304), 'homeassistant.components.button.ButtonEntityDescription', 'ButtonEntityDescription', ([], {'key': '"""identify"""', 'name': '"""Identify"""', 'icon': '"""mdi:help"""', 'entity_category': 'EntityCategory.CONFIG'}), "(key='identify', name='Identify', icon='mdi:help',\n entity_category=EntityCategory.CONFIG)\n", (1211, 1304), False, 'from homeassistant.components.button import ButtonEntity, ButtonEntityDescription\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""\
=================
TorrentWindow - a basic GUI for BitTorrent
=================
This component supports downloading from multiple torrents simultaneously
but no deletion or statistics other than percentage completuion so far.
How does it work?
-----------------
TorrentWindow uses Tkinter to produce a very simple GUI.
It then produces messages for and accepts messages produced by a
TorrentPatron component (also would work with TorrentClient but
TorrentPatron is preferred, see their respective files).
Example Usage
-------------
The following setup allows torrents to be entered as HTTP URLs into the
GUI and then downloaded with progress information for each torrent.
Graphline(
gui=TorrentWindow(),
httpclient=SimpleHTTPClient(),
backend=TorrentPatron(),
linkages = {
("gui", "outbox") : ("backend", "inbox"),
("gui", "fetchersignal") : ("httpclient", "control"),
("gui", "signal") : ("backend", "control"),
("gui", "fetcher") : ("httpclient", "inbox"),
("httpclient", "outbox") : ("backend", "inbox"),
("backend", "outbox"): ("gui", "inbox")
}
).run()
"""
from Kamaelia.UI.Tk.TkWindow import TkWindow
from Axon.Ipc import producerFinished, shutdown
import Tkinter, time
from TorrentPatron import TorrentPatron
from TorrentIPC import *
class TorrentWindow(TkWindow):
Inboxes = {
"inbox" : "From TorrentPatron backend",
"control" : "Tell me to shutdown",
}
Outboxes = {
"outbox" : "To TorrentPatron backend",
"fetcher" : "To TorrentPatron backend via a resource fetcher, e.g. file reader or HTTP client",
"fetchersignal" : "Shutdown resource fetcher",
"signal" : "When I've shutdown"
}
def __init__(self):
self.pendingtorrents = []
self.torrents = {}
super(TorrentWindow, self).__init__()
def setupWindow(self):
"Create the GUI controls and window for this application"
self.entry = Tkinter.Entry(self.window)
self.addtorrentbutton = Tkinter.Button(self.window, text="Add Torrent", command=self.addTorrent)
self.window.title("Kamaelia BitTorrent Client")
self.entry.grid(row=0, column=0, sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S)
self.addtorrentbutton.grid(row=0, column=1, sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S)
self.window.rowconfigure(0, weight=1)
self.window.columnconfigure(0, weight=3)
self.window.columnconfigure(1, weight=1)
def addTorrent(self):
"Request the addition of a new torrent"
torrenturl = self.entry.get()
self.pendingtorrents.append(torrenturl.rsplit("/", 1)[-1])
self.send(torrenturl, "fetcher") # forward on the torrent URL/path to the fetcher
self.entry.delete(0, Tkinter.END)
def main(self):
while not self.isDestroyed():
time.sleep(0.05) # reduces CPU usage but a timer component would be better
yield 1
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdown):
self.send(msg, "signal")
self.window.destroy()
if self.dataReady("inbox"):
msg = self.recv("inbox")
if isinstance(msg, TIPCNewTorrentCreated):
torrentname = self.pendingtorrents.pop(0)
labeltext = Tkinter.StringVar() # allow us to change the label's text on the fly
newlabel = Tkinter.Label(self.window, textvariable=labeltext)
self.torrents[msg.torrentid] = (torrentname, newlabel, labeltext)
labeltext.set(torrentname + " - 0%")
newlabel.grid(row=len(self.torrents), column=0, columnspan=2, sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S)
self.window.rowconfigure(len(self.torrents), weight=1)
elif isinstance(msg, TIPCTorrentStartFail) or isinstance(msg, TIPCTorrentAlreadyDownloading):
self.pendingtorrents.pop(0) # the oldest torrent not yet started failed so remove it from the list of pending torrents
elif isinstance(msg, TIPCTorrentStatusUpdate):
# print msg.statsdictionary.get("fractionDone","-1")
self.torrents[msg.torrentid][2].set(self.torrents[msg.torrentid][0] + " - " + str(int(msg.statsdictionary.get("fractionDone","0") * 100)) + "%")
self.tkupdate()
self.send(shutdown(), "signal")
self.send(shutdown(), "fetchersignal")
if __name__ == "__main__":
from Kamaelia.Chassis.Graphline import Graphline
import sys
sys.path.append("../HTTP")
from HTTPClient import SimpleHTTPClient
Graphline(
gui=TorrentWindow(),
httpclient=SimpleHTTPClient(),
backend=TorrentPatron(),
linkages = {
("gui", "outbox") : ("backend", "inbox"),
("gui", "fetchersignal") : ("httpclient", "control"),
("gui", "signal") : ("backend", "control"),
("gui", "fetcher") : ("httpclient", "inbox"),
("httpclient", "outbox") : ("backend", "inbox"),
("backend", "outbox"): ("gui", "inbox")
}
).run()
|
[
"sys.path.append",
"TorrentPatron.TorrentPatron",
"HTTPClient.SimpleHTTPClient",
"Tkinter.Label",
"time.sleep",
"Tkinter.StringVar",
"Tkinter.Entry",
"Tkinter.Button",
"Axon.Ipc.shutdown"
] |
[((5738, 5764), 'sys.path.append', 'sys.path.append', (['"""../HTTP"""'], {}), "('../HTTP')\n", (5753, 5764), False, 'import sys\n'), ((2860, 2886), 'Tkinter.Entry', 'Tkinter.Entry', (['self.window'], {}), '(self.window)\n', (2873, 2886), False, 'import Tkinter, time\n'), ((2919, 2991), 'Tkinter.Button', 'Tkinter.Button', (['self.window'], {'text': '"""Add Torrent"""', 'command': 'self.addTorrent'}), "(self.window, text='Add Torrent', command=self.addTorrent)\n", (2933, 2991), False, 'import Tkinter, time\n'), ((3781, 3797), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (3791, 3797), False, 'import Tkinter, time\n'), ((5552, 5562), 'Axon.Ipc.shutdown', 'shutdown', ([], {}), '()\n', (5560, 5562), False, 'from Axon.Ipc import producerFinished, shutdown\n'), ((5593, 5603), 'Axon.Ipc.shutdown', 'shutdown', ([], {}), '()\n', (5601, 5603), False, 'from Axon.Ipc import producerFinished, shutdown\n'), ((4365, 4384), 'Tkinter.StringVar', 'Tkinter.StringVar', ([], {}), '()\n', (4382, 4384), False, 'import Tkinter, time\n'), ((4465, 4515), 'Tkinter.Label', 'Tkinter.Label', (['self.window'], {'textvariable': 'labeltext'}), '(self.window, textvariable=labeltext)\n', (4478, 4515), False, 'import Tkinter, time\n'), ((5878, 5896), 'HTTPClient.SimpleHTTPClient', 'SimpleHTTPClient', ([], {}), '()\n', (5894, 5896), False, 'from HTTPClient import SimpleHTTPClient\n'), ((5914, 5929), 'TorrentPatron.TorrentPatron', 'TorrentPatron', ([], {}), '()\n', (5927, 5929), False, 'from TorrentPatron import TorrentPatron\n')]
|
from appi2c.ext.database import db
from appi2c.ext.icon.icon_models import Icon
def list_all_icon():
icon = Icon.query.all()
return icon
def list_icon_id(id: int) -> Icon:
icon = Icon.query.filter_by(id=id).first()
return icon
def create_icon(html_class: str):
icon = Icon(html_class=html_class)
db.session.add(icon)
db.session.commit()
def update_icon(id: int, html_class: str):
Icon.query.filter_by(id=id).update(dict(html_class=html_class))
db.session.commit()
def list_icon_in_device(devices: list):
if devices is not None:
list_icon = []
for device in devices:
icon = Icon.query.filter_by(id=device.icon_id).first()
list_icon.append(icon.html_class)
return list_icon
return False
|
[
"appi2c.ext.icon.icon_models.Icon",
"appi2c.ext.icon.icon_models.Icon.query.filter_by",
"appi2c.ext.icon.icon_models.Icon.query.all",
"appi2c.ext.database.db.session.commit",
"appi2c.ext.database.db.session.add"
] |
[((114, 130), 'appi2c.ext.icon.icon_models.Icon.query.all', 'Icon.query.all', ([], {}), '()\n', (128, 130), False, 'from appi2c.ext.icon.icon_models import Icon\n'), ((294, 321), 'appi2c.ext.icon.icon_models.Icon', 'Icon', ([], {'html_class': 'html_class'}), '(html_class=html_class)\n', (298, 321), False, 'from appi2c.ext.icon.icon_models import Icon\n'), ((326, 346), 'appi2c.ext.database.db.session.add', 'db.session.add', (['icon'], {}), '(icon)\n', (340, 346), False, 'from appi2c.ext.database import db\n'), ((351, 370), 'appi2c.ext.database.db.session.commit', 'db.session.commit', ([], {}), '()\n', (368, 370), False, 'from appi2c.ext.database import db\n'), ((488, 507), 'appi2c.ext.database.db.session.commit', 'db.session.commit', ([], {}), '()\n', (505, 507), False, 'from appi2c.ext.database import db\n'), ((195, 222), 'appi2c.ext.icon.icon_models.Icon.query.filter_by', 'Icon.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (215, 222), False, 'from appi2c.ext.icon.icon_models import Icon\n'), ((420, 447), 'appi2c.ext.icon.icon_models.Icon.query.filter_by', 'Icon.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (440, 447), False, 'from appi2c.ext.icon.icon_models import Icon\n'), ((651, 690), 'appi2c.ext.icon.icon_models.Icon.query.filter_by', 'Icon.query.filter_by', ([], {'id': 'device.icon_id'}), '(id=device.icon_id)\n', (671, 690), False, 'from appi2c.ext.icon.icon_models import Icon\n')]
|
import gym, yumi_gym
import pybullet as p
env = gym.make('yumi-v0')
env.render()
observation = env.reset()
motorsIds = []
for joint in env.joints:
motorsIds.append(p.addUserDebugParameter(joint, -1, 1, 0))
while True:
env.render()
action = []
for motorId in motorsIds:
action.append(p.readUserDebugParameter(motorId))
observation, reward, done, info = env.step(action)
|
[
"pybullet.readUserDebugParameter",
"gym.make",
"pybullet.addUserDebugParameter"
] |
[((49, 68), 'gym.make', 'gym.make', (['"""yumi-v0"""'], {}), "('yumi-v0')\n", (57, 68), False, 'import gym, yumi_gym\n'), ((170, 210), 'pybullet.addUserDebugParameter', 'p.addUserDebugParameter', (['joint', '(-1)', '(1)', '(0)'], {}), '(joint, -1, 1, 0)\n', (193, 210), True, 'import pybullet as p\n'), ((311, 344), 'pybullet.readUserDebugParameter', 'p.readUserDebugParameter', (['motorId'], {}), '(motorId)\n', (335, 344), True, 'import pybullet as p\n')]
|
import argparse
import pandas as pd
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
from pathlib import Path
import json
import librosa
from utils import get_amplitude_scaling_factor, xcorr_searcher_max, load_data
# Filter out performances shorter than ```MIN_DURATION``` secs
MIN_DURATION = 15.0
# Filter out songs with mixtures shorter than vocal in %
# These are errors in the dataset
DURATION_VAR = 0.95
# Framing parameters for RMS
NHOP = 0.010
WIN = 0.025
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--split', type=str, required=True,
help='Dataset to process')
parser.add_argument('--root_path', type=str, required=True,
help='Root path to DAMP-VSEP')
parser.add_argument('--sample_rate', type=int, required=True,
default=16000)
parser.add_argument('--output_meta_path', type=str, required=True,
help='Path where save the metadata')
def main(args):
metadata_path = Path(args.output_meta_path)
track_list = pd.read_csv(f"split/{args.split}.csv")
metadata = []
pool = Pool(processes=cpu_count())
track_inputs = [(t, Path(args.root_path), args.sample_rate)
for i, t in track_list.iterrows()]
for meta in tqdm(pool.imap_unordered(build_metadata, track_inputs),
total=len(track_inputs)):
if meta:
metadata.append(meta)
tracks = {p: m for p, m in metadata}
metadata_path.mkdir(parents=True, exist_ok=True)
json.dump(tracks, open(metadata_path / f"{args.split}_sr{args.sample_rate}.json",
'w'),
indent=2)
def build_metadata(inputs):
track, root, sample_rate = inputs
hop_length = int(sample_rate * NHOP)
frame_length = int(sample_rate * WIN)
vocal = load_data(root / track['vocal_path'],
sample_rate=sample_rate)
# Discard silence vocal target
if vocal.sum() == 0.0:
print(f"Track {track['perf_key']} is silence - discarded")
return None
# Get original duration to discard short vocal target
vocal_dur = librosa.get_duration(vocal, sr=sample_rate)
if vocal_dur < MIN_DURATION:
print(f"Track {track['perf_key']} too short ({vocal_dur} sec) - discarded")
return None
ori_mix = load_data(root / track['mix_path'],
sample_rate=sample_rate)
ori_mix_dur = librosa.get_duration(ori_mix, sr=sample_rate)
if ori_mix_dur < vocal_dur * DURATION_VAR:
print(f"Mixture {track['perf_key']} length ({ori_mix_dur}) is shorter than vocal length ({vocal_dur}) - discarded")
return None
# Get vocal shifting by doing several xcorr of small segments of vocal.
# The shifting time determine the start point of background and vocal.
vocal_shift = xcorr_searcher_max(ori_mix, vocal, sample_rate, frame_length, hop_length)
if vocal_shift <= 0:
vocal_start = abs(vocal_shift)
back_start = 0
else:
vocal_start = 0
back_start = vocal_shift
# Get new/real min duration.
back = load_data(root / track['background_path'],
sample_rate=sample_rate)
vocal = vocal[int(vocal_start * sample_rate):]
back = back[int(back_start * sample_rate):]
vocal_dur = librosa.get_duration(vocal, sr=sample_rate)
back_dur = librosa.get_duration(back, sr=sample_rate)
min_dur = min(vocal_dur, back_dur)
# Create mixture to calculate mean and std
mix = vocal[:int(min_dur * sample_rate)] + back[:int(min_dur * sample_rate)]
# Get amplitude for SNR=0
amplitude_scaler = get_amplitude_scaling_factor(vocal, back)
track_info = dict()
track_info['original_mix'] = track['mix_path']
track_info['original_mix_mean'] = \
f"{ori_mix[int(back_start * sample_rate):int(min_dur * sample_rate)].mean()}"
track_info['original_mix_std'] = \
f"{ori_mix[int(back_start * sample_rate):int(min_dur * sample_rate)].std()}"
track_info['mix_mean'] = f"{mix.mean()}"
track_info['mix_std'] = f"{mix.std()}"
track_info['duration'] = f"{min_dur}"
track_info['vocal'] = track['vocal_path']
track_info['vocal_start'] = f"{vocal_start}"
track_info['scaler'] = f"{amplitude_scaler:}"
track_info['background'] = track['background_path']
track_info['background_start'] = f"{back_start}"
return track['perf_key'], track_info
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
[
"utils.xcorr_searcher_max",
"utils.load_data",
"argparse.ArgumentParser",
"pandas.read_csv",
"utils.get_amplitude_scaling_factor",
"multiprocessing.cpu_count",
"pathlib.Path",
"librosa.get_duration"
] |
[((519, 544), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (542, 544), False, 'import argparse\n'), ((1018, 1045), 'pathlib.Path', 'Path', (['args.output_meta_path'], {}), '(args.output_meta_path)\n', (1022, 1045), False, 'from pathlib import Path\n'), ((1063, 1101), 'pandas.read_csv', 'pd.read_csv', (['f"""split/{args.split}.csv"""'], {}), "(f'split/{args.split}.csv')\n", (1074, 1101), True, 'import pandas as pd\n'), ((1865, 1927), 'utils.load_data', 'load_data', (["(root / track['vocal_path'])"], {'sample_rate': 'sample_rate'}), "(root / track['vocal_path'], sample_rate=sample_rate)\n", (1874, 1927), False, 'from utils import get_amplitude_scaling_factor, xcorr_searcher_max, load_data\n'), ((2203, 2246), 'librosa.get_duration', 'librosa.get_duration', (['vocal'], {'sr': 'sample_rate'}), '(vocal, sr=sample_rate)\n', (2223, 2246), False, 'import librosa\n'), ((2415, 2475), 'utils.load_data', 'load_data', (["(root / track['mix_path'])"], {'sample_rate': 'sample_rate'}), "(root / track['mix_path'], sample_rate=sample_rate)\n", (2424, 2475), False, 'from utils import get_amplitude_scaling_factor, xcorr_searcher_max, load_data\n'), ((2527, 2572), 'librosa.get_duration', 'librosa.get_duration', (['ori_mix'], {'sr': 'sample_rate'}), '(ori_mix, sr=sample_rate)\n', (2547, 2572), False, 'import librosa\n'), ((2958, 3031), 'utils.xcorr_searcher_max', 'xcorr_searcher_max', (['ori_mix', 'vocal', 'sample_rate', 'frame_length', 'hop_length'], {}), '(ori_mix, vocal, sample_rate, frame_length, hop_length)\n', (2976, 3031), False, 'from utils import get_amplitude_scaling_factor, xcorr_searcher_max, load_data\n'), ((3264, 3331), 'utils.load_data', 'load_data', (["(root / track['background_path'])"], {'sample_rate': 'sample_rate'}), "(root / track['background_path'], sample_rate=sample_rate)\n", (3273, 3331), False, 'from utils import get_amplitude_scaling_factor, xcorr_searcher_max, load_data\n'), ((3486, 3529), 'librosa.get_duration', 'librosa.get_duration', (['vocal'], {'sr': 'sample_rate'}), '(vocal, sr=sample_rate)\n', (3506, 3529), False, 'import librosa\n'), ((3549, 3591), 'librosa.get_duration', 'librosa.get_duration', (['back'], {'sr': 'sample_rate'}), '(back, sr=sample_rate)\n', (3569, 3591), False, 'import librosa\n'), ((3834, 3875), 'utils.get_amplitude_scaling_factor', 'get_amplitude_scaling_factor', (['vocal', 'back'], {}), '(vocal, back)\n', (3862, 3875), False, 'from utils import get_amplitude_scaling_factor, xcorr_searcher_max, load_data\n'), ((1147, 1158), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1156, 1158), False, 'from multiprocessing import Pool, cpu_count\n'), ((1184, 1204), 'pathlib.Path', 'Path', (['args.root_path'], {}), '(args.root_path)\n', (1188, 1204), False, 'from pathlib import Path\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import re
import os
import sys
from matplotlib import rcParams
from cycler import cycler
import itertools
if len(sys.argv) < 2:
print("Especifique la carpeta con resultados con la siguiente sintaxis:")
print("python %s carpeta_resultados" % sys.argv[0])
exit(1)
results_folder = sys.argv[1]
digit = r'\d*\.?\d+'
regex = r'^result_(%s)_(%s)_%s_\w+_%s_%s_%s_%s_\w+_%s_\.txt$' % (digit, digit, digit, digit, digit, digit, digit, digit)
"""
print(regex)
tomatch = 'result_1.1000_0.6000_50.0000_WallPeriodicBC_1_0.5000_1_0.0100_False_1024_.txt'
matches = re.match(regex, tomatch)
if matches:
print(matches.group(1))
print(matches.group(2))
else:
print("no match")
"""
files = os.listdir(results_folder)
time_lambda_curves = {}
for filename in files:
matches = re.match(regex, filename)
if not matches:
continue
the_lambda = float(matches.group(1))
the_eta = float(matches.group(2))
with open(results_folder + filename, 'r') as f:
first_line = f.readline()
the_time = float(first_line)
if the_eta not in time_lambda_curves:
time_lambda_curves[the_eta] = {
'times': [],
'lambdas': []
}
time_lambda_curves[the_eta]['times'].append(the_time)
time_lambda_curves[the_eta]['lambdas'].append(the_lambda)
marker = itertools.cycle(('s', 'X', '+', 'o', '*', '>', 'h', 'd', '.'))
lines = itertools.cycle((':', '-.', '--', '-'))
# Configuraciones de estilo de los graficos
plt.figure(figsize=(12, 10), dpi=80, facecolor='w', edgecolor='k')
plt.rc('lines', linewidth=1)
plt.rc('axes', prop_cycle=(cycler('color', ['blue', 'green', 'red',
'magenta', 'black',
'purple', 'pink', 'brown',
'orange', 'coral',
'lightblue', 'lime', 'lavender',
'turquoise', 'darkgreen', 'tan',
'salmon', 'gold',
'darkred', 'darkblue'])))
to_plot = []
for eta, values in time_lambda_curves.items():
to_plot.append((eta, values))
to_plot.sort()
#for eta, values in time_lambda_curves.items():
for eta, values in to_plot:
the_times = values['times']
the_lambdas = values['lambdas']
order = np.argsort(the_lambdas)
xs = np.array(the_lambdas)[order]
ys = np.array(the_times)[order]
plt.plot(xs, ys, label="$\eta = %.1f$" % eta, marker=next(marker), markersize=15, linewidth=3)
plt.xticks(np.arange(0.0, 1.4, 0.1))
plt.yticks(np.arange(0, 10001, 1000))
plt.xlabel('$\lambda$', fontsize=18)
plt.ylabel('Tiempo (s)', fontsize=18)
plt.title('Tiempo de ejecución del algoritmo de Listas de Verlet\n para un tiempo de simulación físico de 50 segundos', fontsize=22, y=1.02)
#plot.legend(loc=2, prop={'size': 6})
plt.legend(prop={'size': 16})
plt.grid(alpha=0.5)
plt.show()
|
[
"matplotlib.pyplot.title",
"cycler.cycler",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"re.match",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.rc",
"numpy.arange",
"itertools.cycle",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"os.listdir"
] |
[((753, 779), 'os.listdir', 'os.listdir', (['results_folder'], {}), '(results_folder)\n', (763, 779), False, 'import os\n'), ((1396, 1458), 'itertools.cycle', 'itertools.cycle', (["('s', 'X', '+', 'o', '*', '>', 'h', 'd', '.')"], {}), "(('s', 'X', '+', 'o', '*', '>', 'h', 'd', '.'))\n", (1411, 1458), False, 'import itertools\n'), ((1467, 1506), 'itertools.cycle', 'itertools.cycle', (["(':', '-.', '--', '-')"], {}), "((':', '-.', '--', '-'))\n", (1482, 1506), False, 'import itertools\n'), ((1552, 1618), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)', 'dpi': '(80)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(figsize=(12, 10), dpi=80, facecolor='w', edgecolor='k')\n", (1562, 1618), True, 'import matplotlib.pyplot as plt\n'), ((1619, 1647), 'matplotlib.pyplot.rc', 'plt.rc', (['"""lines"""'], {'linewidth': '(1)'}), "('lines', linewidth=1)\n", (1625, 1647), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3048), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': "{'size': 16}"}), "(prop={'size': 16})\n", (3029, 3048), True, 'import matplotlib.pyplot as plt\n'), ((3049, 3068), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'alpha': '(0.5)'}), '(alpha=0.5)\n', (3057, 3068), True, 'import matplotlib.pyplot as plt\n'), ((3069, 3079), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3077, 3079), True, 'import matplotlib.pyplot as plt\n'), ((843, 868), 're.match', 're.match', (['regex', 'filename'], {}), '(regex, filename)\n', (851, 868), False, 'import re\n'), ((2470, 2493), 'numpy.argsort', 'np.argsort', (['the_lambdas'], {}), '(the_lambdas)\n', (2480, 2493), True, 'import numpy as np\n'), ((2756, 2793), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\lambda$"""'], {'fontsize': '(18)'}), "('$\\\\lambda$', fontsize=18)\n", (2766, 2793), True, 'import matplotlib.pyplot as plt\n'), ((2797, 2834), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Tiempo (s)"""'], {'fontsize': '(18)'}), "('Tiempo (s)', fontsize=18)\n", (2807, 2834), True, 'import matplotlib.pyplot as plt\n'), ((2839, 2992), 'matplotlib.pyplot.title', 'plt.title', (['"""Tiempo de ejecución del algoritmo de Listas de Verlet\n para un tiempo de simulación físico de 50 segundos"""'], {'fontsize': '(22)', 'y': '(1.02)'}), '(\n """Tiempo de ejecución del algoritmo de Listas de Verlet\n para un tiempo de simulación físico de 50 segundos"""\n , fontsize=22, y=1.02)\n', (2848, 2992), True, 'import matplotlib.pyplot as plt\n'), ((1675, 1897), 'cycler.cycler', 'cycler', (['"""color"""', "['blue', 'green', 'red', 'magenta', 'black', 'purple', 'pink', 'brown',\n 'orange', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',\n 'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']"], {}), "('color', ['blue', 'green', 'red', 'magenta', 'black', 'purple',\n 'pink', 'brown', 'orange', 'coral', 'lightblue', 'lime', 'lavender',\n 'turquoise', 'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue'])\n", (1681, 1897), False, 'from cycler import cycler\n'), ((2504, 2525), 'numpy.array', 'np.array', (['the_lambdas'], {}), '(the_lambdas)\n', (2512, 2525), True, 'import numpy as np\n'), ((2542, 2561), 'numpy.array', 'np.array', (['the_times'], {}), '(the_times)\n', (2550, 2561), True, 'import numpy as np\n'), ((2684, 2708), 'numpy.arange', 'np.arange', (['(0.0)', '(1.4)', '(0.1)'], {}), '(0.0, 1.4, 0.1)\n', (2693, 2708), True, 'import numpy as np\n'), ((2725, 2750), 'numpy.arange', 'np.arange', (['(0)', '(10001)', '(1000)'], {}), '(0, 10001, 1000)\n', (2734, 2750), True, 'import numpy as np\n')]
|
import datetime
from django.conf import settings
from django.utils import dateformat
import pytz
from forum.models import ForumProfile
def user_timezone(dt, user):
"""
Converts the given datetime to the given User's timezone, if they
have one set in their forum profile.
Adapted from http://www.djangosnippets.org/snippets/183/
"""
tz = settings.TIME_ZONE
if user.is_authenticated():
profile = ForumProfile.objects.get_for_user(user)
if profile.timezone:
tz = profile.timezone
try:
result = dt.astimezone(pytz.timezone(tz))
except ValueError:
# The datetime was stored without timezone info, so use the
# timezone configured in settings.
result = dt.replace(tzinfo=pytz.timezone(settings.TIME_ZONE)) \
.astimezone(pytz.timezone(tz))
return result
def format_datetime(dt, user, date_format, time_format, separator=' '):
"""
Formats a datetime, using ``'Today'`` or ``'Yesterday'`` instead of
the given date format when appropriate.
If a User is given and they have a timezone set in their profile,
the datetime will be translated to their local time.
"""
if user:
dt = user_timezone(dt, user)
today = user_timezone(datetime.datetime.now(), user).date()
else:
today = datetime.date.today()
date_part = dt.date()
delta = date_part - today
if delta.days == 0:
date = u'Today'
elif delta.days == -1:
date = u'Yesterday'
else:
date = dateformat.format(dt, date_format)
return u'%s%s%s' % (date, separator,
dateformat.time_format(dt.time(), time_format))
|
[
"datetime.date.today",
"django.utils.dateformat.format",
"pytz.timezone",
"forum.models.ForumProfile.objects.get_for_user",
"datetime.datetime.now"
] |
[((451, 490), 'forum.models.ForumProfile.objects.get_for_user', 'ForumProfile.objects.get_for_user', (['user'], {}), '(user)\n', (484, 490), False, 'from forum.models import ForumProfile\n'), ((1389, 1410), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1408, 1410), False, 'import datetime\n'), ((598, 615), 'pytz.timezone', 'pytz.timezone', (['tz'], {}), '(tz)\n', (611, 615), False, 'import pytz\n'), ((1603, 1637), 'django.utils.dateformat.format', 'dateformat.format', (['dt', 'date_format'], {}), '(dt, date_format)\n', (1620, 1637), False, 'from django.utils import dateformat\n'), ((860, 877), 'pytz.timezone', 'pytz.timezone', (['tz'], {}), '(tz)\n', (873, 877), False, 'import pytz\n'), ((1323, 1346), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1344, 1346), False, 'import datetime\n'), ((790, 823), 'pytz.timezone', 'pytz.timezone', (['settings.TIME_ZONE'], {}), '(settings.TIME_ZONE)\n', (803, 823), False, 'import pytz\n')]
|
#!/usr/bin/env python
__author__ = '<NAME>'
import argparse
from RouToolPa.Tools.Samtools import SamtoolsV1
from RouToolPa.Tools.Bedtools import BamToFastq
from RouToolPa.GeneralRoutines import FileRoutines
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True,
help="Input bam file")
parser.add_argument("-t", "--threads", action="store", dest="threads", type=int, default=1,
help="Number of threads to use. Default - 1")
parser.add_argument("-p", "--prepare_bam", action="store_true", dest="prepare_bam",
help="Prepare bam for reads extraction(filter out supplementary and not primary alignments"
"and sort by name)")
"""
parser.add_argument("-e", "--prepared_bam", action="store", dest="prepared_bam",
help="File to write sorted bam file. Required if -p/--prepare_bam option is set")
"""
parser.add_argument("-e", "--prepared_bam_prefix", action="store", dest="prepared_bam_prefix",
help="Prefix of sorted bam file(s). Required if -p/--prepare_bam option is set")
parser.add_argument("-d", "--temp_dir", action="store", dest="temp_dir",
help="Directory to use for temporary files. Required if -p/--prepare_bam option is set")
parser.add_argument("-o", "--out_prefix", action="store", dest="out_prefix", required=True,
help="Prefix of output fastq files")
parser.add_argument("-s", "--single_ends", action="store_false", dest="paired", default=True,
help="Reads are SE")
parser.add_argument("-x", "--mix_ends", action="store_true", dest="mix_ends", default=False,
help="Reads are mix of PE and SE")
parser.add_argument("-m", "--max_memory_per_thread", action="store", dest="max_memory_per_thread", default="1G",
help="Maximum memory per thread. Default - 1G")
args = parser.parse_args()
if args.prepare_bam and ((not args.prepared_bam_prefix) or (not args.temp_dir)):
raise ValueError("Options -e/--prepared_bam_prefix and -m/--temp_dir must be set if -p/--prepare_bam option is used")
SamtoolsV1.threads = args.threads
if args.prepare_bam or args.mix_ends:
FileRoutines.safe_mkdir(FileRoutines.check_path(args.temp_dir))
prepared_pe_bam_file = "%s.bam" % args.prepared_bam_prefix
prepared_unpaired_bam_file = ("%s.unpaired.bam" % args.prepared_bam_prefix) if args.mix_ends else None
"""
SamtoolsV1.prepare_bam_for_read_extraction(args.input, args.prepared_bam, temp_file_prefix=args.temp_dir,
max_memory_per_thread=args.max_memory_per_thread)
"""
SamtoolsV1.prepare_bam_for_read_extraction(args.input, prepared_pe_bam_file, temp_file_prefix=args.temp_dir,
max_memory_per_thread=args.max_memory_per_thread,
bam_file_to_write_unpaired_reads=prepared_unpaired_bam_file)
if args.paired:
left_fastq = "%s_1.fastq" % args.out_prefix
right_fastq = "%s_2.fastq" % args.out_prefix
unpaired_fastq = "%s.unpaired.fastq" % args.out_prefix
else:
left_fastq = "%s.fastq" % args.out_prefix
right_fastq = None
if args.mix_ends:
BamToFastq.convert(prepared_unpaired_bam_file, unpaired_fastq, out_right_fastq=None)
#BamToFastq.convert(args.prepared_bam if args.prepare_bam else args.input, left_fastq, out_right_fastq=right_fastq)
BamToFastq.convert(prepared_pe_bam_file if args.prepare_bam else args.input, left_fastq, out_right_fastq=right_fastq)
|
[
"RouToolPa.Tools.Bedtools.BamToFastq.convert",
"RouToolPa.Tools.Samtools.SamtoolsV1.prepare_bam_for_read_extraction",
"RouToolPa.GeneralRoutines.FileRoutines.check_path",
"argparse.ArgumentParser"
] |
[((217, 242), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (240, 242), False, 'import argparse\n'), ((3507, 3628), 'RouToolPa.Tools.Bedtools.BamToFastq.convert', 'BamToFastq.convert', (['(prepared_pe_bam_file if args.prepare_bam else args.input)', 'left_fastq'], {'out_right_fastq': 'right_fastq'}), '(prepared_pe_bam_file if args.prepare_bam else args.input,\n left_fastq, out_right_fastq=right_fastq)\n', (3525, 3628), False, 'from RouToolPa.Tools.Bedtools import BamToFastq\n'), ((2721, 2954), 'RouToolPa.Tools.Samtools.SamtoolsV1.prepare_bam_for_read_extraction', 'SamtoolsV1.prepare_bam_for_read_extraction', (['args.input', 'prepared_pe_bam_file'], {'temp_file_prefix': 'args.temp_dir', 'max_memory_per_thread': 'args.max_memory_per_thread', 'bam_file_to_write_unpaired_reads': 'prepared_unpaired_bam_file'}), '(args.input, prepared_pe_bam_file,\n temp_file_prefix=args.temp_dir, max_memory_per_thread=args.\n max_memory_per_thread, bam_file_to_write_unpaired_reads=\n prepared_unpaired_bam_file)\n', (2763, 2954), False, 'from RouToolPa.Tools.Samtools import SamtoolsV1\n'), ((3305, 3393), 'RouToolPa.Tools.Bedtools.BamToFastq.convert', 'BamToFastq.convert', (['prepared_unpaired_bam_file', 'unpaired_fastq'], {'out_right_fastq': 'None'}), '(prepared_unpaired_bam_file, unpaired_fastq,\n out_right_fastq=None)\n', (3323, 3393), False, 'from RouToolPa.Tools.Bedtools import BamToFastq\n'), ((2284, 2322), 'RouToolPa.GeneralRoutines.FileRoutines.check_path', 'FileRoutines.check_path', (['args.temp_dir'], {}), '(args.temp_dir)\n', (2307, 2322), False, 'from RouToolPa.GeneralRoutines import FileRoutines\n')]
|
# -*- coding: utf-8 -*-
import Lexico
import Arbol
import string
import sys
import os
class Sintactico():
def __init__(self):
with open('entrada.txt','r') as Archivo: self.Cadena = Archivo.read()+'$'
Archivo.close()
#===============================================================
self.Suma = Arbol.Suma
self.Multi = Arbol.Multi
self.Asign = Arbol.Asignacion
self.ReservIf = Arbol.ReservIf
self.ReservPrint = Arbol.ReservPrint
self.Separador = Arbol.Separador
self.Signo = Arbol.Signo
self.ExpresionArb = Arbol.Expre
self.Bloque = Arbol.Bloque
self.ReservElse = Arbol.ReservElse
self.ReservWhile = Arbol.ReservWhile
self.Logico = Arbol.Logico
self.Relacional = Arbol.Relacional
self.Identi = Arbol.Identificador
self.Entero = Arbol.Entero
self.Flotante = Arbol.Flotante
self.CadenaArb = Arbol.Cadena
#===============================================================
self.ListaArbolesBloque = [[],[],[],[],[]] # Permite Anidación de hasta 5 niveles.
self.ListaArboles = []
self.ArbolActual = []
self.ArbolPila = []
self.lexico = Lexico.Lexico(self.Cadena)
self.Cadena = ''
self.PalabReserv = ['if', 'else', 'do','while', 'print']
self.BloqueActivo = [False, False, False, False, False] # Permite Anidación de hasta 5 niveles.
def Resultado(self, Salida):
if Salida == 0:
print('\n\n\n\t Error Sintáctico: ', Salida)
for x in range(5):
self.lexico.sigSimbolo()
print(self.lexico.simbolo,end='')
Archivo = open('salida.txt','w')
Cadena = Archivo.write(str(Salida))
Archivo.close()
def error(self):
self.Resultado(0)
sys.exit()
def analiza(self):
self.lexico.sigSimbolo()
self.A()
self.Comprueba(20)
def Comprueba(self, Tipo):
if self.lexico.tipo == Tipo:
try: self.lexico.sigSimbolo()
except: self.Resultado(1)
else: self.error()
def A(self):
xD = True
if self.lexico.tipo == 2 and self.lexico.simbolo in self.PalabReserv:
while xD:
xD = False
if self.lexico.simbolo == 'if':
self.If()
xD = True
if self.lexico.simbolo == 'do':
self.DoWhile()
xD = True
if self.lexico.simbolo == 'while':
self.While()
xD = True
if self.lexico.simbolo == 'for':
self.For()
xD = True
if self.lexico.simbolo == 'print':
self.Print()
xD = True
self.Asignacion()
def Asignacion(self, Bool=True):
#===============================================================
Simbolo = None
#===============================================================
if self.lexico.tipo == 2:
#================================================================
R = self.Identi(None, self.lexico.simbolo)
#================================================================
self.lexico.sigSimbolo()
self.Comprueba(15)
#================================================================
P = self.Expresion()
P = self.Asign(R,P)
if self.BloqueActivo[0]:
if self.BloqueActivo[4]: self.ListaArbolesBloque[4].append(P)
elif self.BloqueActivo[3]: self.ListaArbolesBloque[3].append(P)
elif self.BloqueActivo[2]: self.ListaArbolesBloque[2].append(P)
elif self.BloqueActivo[1]: self.ListaArbolesBloque[1].append(P)
elif self.BloqueActivo[0]: self.ListaArbolesBloque[0].append(P)
else: self.ListaArboles.append(P)
#================================================================
if Bool:
self.Comprueba(12)
self.A()
def If(self):
self.lexico.sigSimbolo()
self.Comprueba(11)
#===============================================================
P = self.ComparacionLogica()
R = self.ReservIf()
R.SetHijo(P)
#===============================================================
self.Comprueba(22)
if self.lexico.tipo == 23:
#===============================================================
if self.BloqueActivo[0] == False: self.BloqueActivo[0] = True
elif self.BloqueActivo[1] == False: self.BloqueActivo[1] = True
elif self.BloqueActivo[2] == False: self.BloqueActivo[2] = True
elif self.BloqueActivo[3] == False: self.BloqueActivo[3] = True
elif self.BloqueActivo[4] == False: self.BloqueActivo[4] = True
B = self.Bloque()
#===============================================================
self.lexico.sigSimbolo()
self.A()
self.Comprueba(24)
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]:
B.SetListaHijos(self.ListaArbolesBloque[4])
self.BloqueActivo[4] = False
self.ListaArbolesBloque[4] = []
elif self.BloqueActivo[3]:
B.SetListaHijos(self.ListaArbolesBloque[3])
self.BloqueActivo[3] = False
self.ListaArbolesBloque[3] = []
elif self.BloqueActivo[2]:
B.SetListaHijos(self.ListaArbolesBloque[2])
self.BloqueActivo[2] = False
self.ListaArbolesBloque[2] = []
elif self.BloqueActivo[1]:
B.SetListaHijos(self.ListaArbolesBloque[1])
self.BloqueActivo[1] = False
self.ListaArbolesBloque[1] = []
elif self.BloqueActivo[0]:
B.SetListaHijos(self.ListaArbolesBloque[0])
self.BloqueActivo[0] = False
self.ListaArbolesBloque[0] = []
R.SetHijo(B)
#===============================================================
else:
#===============================================================
if self.BloqueActivo[0] == False: self.BloqueActivo[0] = True
elif self.BloqueActivo[1] == False: self.BloqueActivo[1] = True
elif self.BloqueActivo[2] == False: self.BloqueActivo[2] = True
elif self.BloqueActivo[3] == False: self.BloqueActivo[3] = True
elif self.BloqueActivo[4] == False: self.BloqueActivo[4] = True
B = self.Bloque()
#===============================================================
if self.lexico.simbolo == 'print': self.Print()
else:
self.Asignacion(False)
self.Comprueba(12);
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]:
B.SetListaHijos(self.ListaArbolesBloque[4])
self.BloqueActivo[4] = False
self.ListaArbolesBloque[4] = []
elif self.BloqueActivo[3]:
B.SetListaHijos(self.ListaArbolesBloque[3])
self.BloqueActivo[3] = False
self.ListaArbolesBloque[3] = []
elif self.BloqueActivo[2]:
B.SetListaHijos(self.ListaArbolesBloque[2])
self.BloqueActivo[2] = False
self.ListaArbolesBloque[2] = []
elif self.BloqueActivo[1]:
B.SetListaHijos(self.ListaArbolesBloque[1])
self.BloqueActivo[1] = False
self.ListaArbolesBloque[1] = []
elif self.BloqueActivo[0]:
B.SetListaHijos(self.ListaArbolesBloque[0])
self.BloqueActivo[0] = False
self.ListaArbolesBloque[0] = []
R.SetHijo(B)
#===============================================================
if self.lexico.simbolo == 'else':
self.lexico.sigSimbolo()
if self.lexico.tipo == 23:
if self.BloqueActivo[0] == False: self.BloqueActivo[0] = True
elif self.BloqueActivo[1] == False: self.BloqueActivo[1] = True
elif self.BloqueActivo[2] == False: self.BloqueActivo[2] = True
elif self.BloqueActivo[3] == False: self.BloqueActivo[3] = True
elif self.BloqueActivo[4] == False: self.BloqueActivo[4] = True
E = self.ReservElse()
self.lexico.sigSimbolo()
self.A()
self.Comprueba(24)
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]:
E.SetListaHijos(self.ListaArbolesBloque[4])
self.BloqueActivo[4] = False
self.ListaArbolesBloque[4] = []
elif self.BloqueActivo[3]:
E.SetListaHijos(self.ListaArbolesBloque[3])
self.BloqueActivo[3] = False
self.ListaArbolesBloque[3] = []
elif self.BloqueActivo[2]:
E.SetListaHijos(self.ListaArbolesBloque[2])
self.BloqueActivo[2] = False
self.ListaArbolesBloque[2] = []
elif self.BloqueActivo[1]:
E.SetListaHijos(self.ListaArbolesBloque[1])
self.BloqueActivo[1] = False
self.ListaArbolesBloque[1] = []
elif self.BloqueActivo[0]:
E.SetListaHijos(self.ListaArbolesBloque[0])
self.BloqueActivo[0] = False
self.ListaArbolesBloque[0] = []
#===============================================================
else:
#===============================================================
if self.BloqueActivo[0] == False: self.BloqueActivo[0] = True
elif self.BloqueActivo[1] == False: self.BloqueActivo[1] = True
elif self.BloqueActivo[2] == False: self.BloqueActivo[2] = True
elif self.BloqueActivo[3] == False: self.BloqueActivo[3] = True
elif self.BloqueActivo[4] == False: self.BloqueActivo[4] = True
E = self.ReservElse()
#===============================================================
if self.lexico.simbolo == 'print': self.Print()
else:
self.Asignacion(False)
self.Comprueba(12);
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]:
E.SetListaHijos(self.ListaArbolesBloque[4])
self.BloqueActivo[4] = False
self.ListaArbolesBloque[4] = []
elif self.BloqueActivo[3]:
E.SetListaHijos(self.ListaArbolesBloque[3])
self.BloqueActivo[3] = False
self.ListaArbolesBloque[3] = []
elif self.BloqueActivo[2]:
E.SetListaHijos(self.ListaArbolesBloque[2])
self.BloqueActivo[2] = False
self.ListaArbolesBloque[2] = []
elif self.BloqueActivo[1]:
E.SetListaHijos(self.ListaArbolesBloque[1])
self.BloqueActivo[1] = False
self.ListaArbolesBloque[1] = []
elif self.BloqueActivo[0]:
E.SetListaHijos(self.ListaArbolesBloque[0])
self.BloqueActivo[0] = False
self.ListaArbolesBloque[0] = []
#===============================================================
#===============================================================
R.SetHijo(E)
#===============================================================
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]: self.ListaArbolesBloque[4].append(R)
elif self.BloqueActivo[3]: self.ListaArbolesBloque[3].append(R)
elif self.BloqueActivo[2]: self.ListaArbolesBloque[2].append(R)
elif self.BloqueActivo[1]: self.ListaArbolesBloque[1].append(R)
elif self.BloqueActivo[0]: self.ListaArbolesBloque[0].append(R)
else: self.ListaArboles.append(R)
#===============================================================
def While(self):
self.lexico.sigSimbolo()
self.Comprueba(11)
#===============================================================
P = self.ComparacionLogica()
W = self.ReservWhile()
W.SetHijo(P)
#===============================================================
self.Comprueba(22)
if self.lexico.tipo == 23:
#===============================================================
if self.BloqueActivo[0] == False: self.BloqueActivo[0] = True
elif self.BloqueActivo[1] == False: self.BloqueActivo[1] = True
elif self.BloqueActivo[2] == False: self.BloqueActivo[2] = True
elif self.BloqueActivo[3] == False: self.BloqueActivo[3] = True
elif self.BloqueActivo[4] == False: self.BloqueActivo[4] = True
B = self.Bloque()
#===============================================================
self.lexico.sigSimbolo()
self.A()
self.Comprueba(24)
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]:
B.SetListaHijos(self.ListaArbolesBloque[4])
self.BloqueActivo[4] = False
self.ListaArbolesBloque[4] = []
elif self.BloqueActivo[3]:
B.SetListaHijos(self.ListaArbolesBloque[3])
self.BloqueActivo[3] = False
self.ListaArbolesBloque[3] = []
elif self.BloqueActivo[2]:
B.SetListaHijos(self.ListaArbolesBloque[2])
self.BloqueActivo[2] = False
self.ListaArbolesBloque[2] = []
elif self.BloqueActivo[1]:
B.SetListaHijos(self.ListaArbolesBloque[1])
self.BloqueActivo[1] = False
self.ListaArbolesBloque[1] = []
elif self.BloqueActivo[0]:
B.SetListaHijos(self.ListaArbolesBloque[0])
self.BloqueActivo[0] = False
self.ListaArbolesBloque[0] = []
W.SetHijo(B)
#===============================================================
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]: self.ListaArbolesBloque[4].append(W)
elif self.BloqueActivo[3]: self.ListaArbolesBloque[3].append(W)
elif self.BloqueActivo[2]: self.ListaArbolesBloque[2].append(W)
elif self.BloqueActivo[1]: self.ListaArbolesBloque[1].append(W)
elif self.BloqueActivo[0]: self.ListaArbolesBloque[0].append(W)
else: self.ListaArboles.append(W)
#===============================================================
def DoWhile(self):
self.lexico.sigSimbolo()
self.Comprueba(23)
self.A()
self.Comprueba(24)
if self.lexico.simbolo == 'while':
self.lexico.sigSimbolo()
self.Comprueba(11)
self.ComparacionLogica()
self.Comprueba(22)
self.Comprueba(12)
else: self.error()
def For(self):
self.lexico.sigSimbolo()
self.Comprueba(11)
self.Asignacion(False)
self.Comprueba(12)
if (self.lexico.tipo == 2 or self.lexico.tipo == 3 or self.lexico.tipo == 5) and not self.lexico.tipo in self.PalabReserv:
self.lexico.sigSimbolo()
if self.lexico.tipo == 14:
self.lexico.sigSimbolo()
if (self.lexico.tipo == 2 or self.lexico.tipo == 3 or self.lexico.tipo == 5) and not self.lexico.tipo in self.PalabReserv: self.lexico.sigSimbolo()
self.Comprueba(12)
self.Asignacion(False)
self.Comprueba(22)
if self.lexico.tipo == 23:
self.lexico.sigSimbolo()
self.A()
self.Comprueba(24)
def Expresion(self, Bool=True): # Permite Recursividad
#================================================================
P = None
Q = None
Tipo = None
xD = False
Sign = False
ArbolPila = []
#================================================================
if self.lexico.tipo == 9:
Sign = self.lexico.simbolo
self.lexico.sigSimbolo()
if self.lexico.tipo == 11:
self.lexico.sigSimbolo()
#================================================================
P = self.Expresion()
ArbolPila.append(P)
#================================================================
self.Comprueba(22)
xD = True
# 2 = IDENTIFICADOR; 3 = ENTERO; 5 = FLOTANTE; 8 = CADENA = "Hola xD"
if self.lexico.tipo == 2 or self.lexico.tipo == 3\
or self.lexico.tipo == 5 or self.lexico.tipo == 8\
or xD == True:
if xD == False:
#================================================================
if self.lexico.tipo == 2: P = self.Identi(None, self.lexico.simbolo)
elif self.lexico.tipo == 3: P = self.Entero('i', self.lexico.simbolo)
elif self.lexico.tipo == 5: P = self.Flotante('r', self.lexico.simbolo)
elif self.lexico.tipo == 8: P = self.CadenaArb('c', self.lexico.simbolo)
ArbolPila.append(P)
#================================================================
self.lexico.sigSimbolo()
else: xD = False
#================================================================
if Sign != False:
P = self.Signo(P, Sign)
ArbolPila.pop()
ArbolPila.append(P)
Sign = False
#================================================================
while self.lexico.tipo == 9 or self.lexico.tipo == 10:
#================================================================
Tipo = (self.lexico.tipo, self.lexico.simbolo)
ArbolPila.append(Tipo)
#================================================================
self.lexico.sigSimbolo()
if self.lexico.tipo == 9:
Sign = self.lexico.simbolo
self.lexico.sigSimbolo()
if self.lexico.tipo == 11:
self.lexico.sigSimbolo()
#================================================================
Q = self.Expresion()
ArbolPila.append(Q)
#================================================================
self.Comprueba(22)
xD = True
if self.lexico.tipo == 2 or self.lexico.tipo == 3\
or self.lexico.tipo == 5 or self.lexico.tipo == 8\
or xD == True:
if xD == False:
#================================================================
if self.lexico.tipo == 2: Q = self.Identi(None, self.lexico.simbolo)
elif self.lexico.tipo == 3: Q = self.Entero('i', self.lexico.simbolo)
elif self.lexico.tipo == 5: Q = self.Flotante('r', self.lexico.simbolo)
elif self.lexico.tipo == 8: Q = self.CadenaArb('c', self.lexico.simbolo)
ArbolPila.append(Q)
#================================================================
self.lexico.sigSimbolo()
else: xD = False
else: self.error()
#================================================================
if Sign != False:
Q = self.Signo(Q, Sign)
ArbolPila.pop()
ArbolPila.append(Q)
Sign = False
if Bool:
if Tipo[0] == 9: P = self.Suma(P, Q, Tipo[1])
elif Tipo[0] == 10: P = self.Multi(P, Q, Tipo[1])
#================================================================
if Bool == False:
# ~ print('\n')
ArbolPila = ArbolPila[::-1]
P = ArbolPila.pop(0)
# ~ print(P)
if ArbolPila != []:
Operador = ArbolPila.pop(0)
Valor1 = ArbolPila.pop(0)
# ~ print(Operador)
# ~ print(Valor1)
if Operador[0] == 9: P = self.Suma( Valor1, P, Operador[1])
elif Operador[0] == 10: P = self.Multi(Valor1, P, Operador[1])
Cont = 0
for x in ArbolPila:
# ~ print(x)
if Cont % 2 == 0: Operador = x
elif Cont % 2 == 1:
Valor1 = x
if Operador[0] == 9: P = self.Suma( Valor1, P, Operador[1])
elif Operador[0] == 10: P = self.Multi(Valor1, P, Operador[1])
Cont += 1
return P
def Print(self):
self.lexico.sigSimbolo()
self.Comprueba(11)
#===============================================================
P = self.Expresion()
P = self.ExpresionArb(P)
#===============================================================
self.Comprueba(22)
#===============================================================
P = self.ReservPrint(P)
if self.BloqueActivo[0]:
if self.BloqueActivo[4]: self.ListaArbolesBloque[4].append(P)
elif self.BloqueActivo[3]: self.ListaArbolesBloque[3].append(P)
elif self.BloqueActivo[2]: self.ListaArbolesBloque[2].append(P)
elif self.BloqueActivo[1]: self.ListaArbolesBloque[1].append(P)
elif self.BloqueActivo[0]: self.ListaArbolesBloque[0].append(P)
else: self.ListaArboles.append(P)
#===============================================================
self.Comprueba(12)
def ComparacionLogica(self):
#================================================================
P = self.ComparacionRelacional()
#================================================================
while self.lexico.tipo == 19:
self.lexico.sigSimbolo()
#================================================================
Q = self.ComparacionRelacional()
P = self.Logico(P, Q)
#================================================================
#================================================================
return P
#================================================================
def ComparacionRelacional(self):
#================================================================
P = None
Q = None
Simbolo = None
P = self.Expresion()
#================================================================
if self.lexico.tipo == 16:
Simbolo = self.lexico.simbolo
self.lexico.sigSimbolo()
Simbolo += self.lexico.simbolo
self.Comprueba(15)
#================================================================
Q = self.Expresion()
P = self.Relacional(P, Q, Simbolo)
#================================================================
elif self.lexico.tipo == 14:
Simbolo = self.lexico.simbolo
self.lexico.sigSimbolo()
#================================================================
Q = self.Expresion()
P = self.Relacional(P, Q, Simbolo)
#================================================================
#================================================================
return P
#================================================================
def P(self): os.system('Pause > Nul')
|
[
"Lexico.Lexico",
"os.system",
"sys.exit"
] |
[((1128, 1154), 'Lexico.Lexico', 'Lexico.Lexico', (['self.Cadena'], {}), '(self.Cadena)\n', (1141, 1154), False, 'import Lexico\n'), ((1668, 1678), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1676, 1678), False, 'import sys\n'), ((21406, 21430), 'os.system', 'os.system', (['"""Pause > Nul"""'], {}), "('Pause > Nul')\n", (21415, 21430), False, 'import os\n')]
|
from httpolice.citation import RFC
from httpolice.parse import (auto, empty, fill_names, literal, maybe_str,
octet_range, pivot, string, string1, string_times,
subst)
from httpolice.syntax.common import ALPHA, DIGIT, HEXDIG
pct_encoded = '%' + HEXDIG + HEXDIG > auto
sub_delims = (literal('!') | '$' | '&' | "'" | '(' | ')' | '*' | '+' |
',' | ';' | '=') > auto
unreserved = ALPHA | DIGIT | '-' | '.' | '_' | '~' > auto
pchar = unreserved | sub_delims | ':' | '@' | pct_encoded > auto
segment = string(pchar) > auto
segment_nz = string1(pchar) > auto
segment_nz_nc = string1(unreserved | sub_delims | '@' | pct_encoded) > auto
scheme = ALPHA + string(ALPHA | DIGIT | '+' | '-' | '.') > pivot
userinfo = string(unreserved | sub_delims | ':' | pct_encoded) > pivot
dec_octet = (DIGIT |
octet_range(0x31, 0x39) + DIGIT |
'1' + DIGIT + DIGIT |
'2' + octet_range(0x30, 0x34) + DIGIT |
'25' + octet_range(0x30, 0x35)) > auto
IPv4address = (dec_octet + '.' + dec_octet + '.' +
dec_octet + '.' + dec_octet) > pivot
h16 = string_times(1, 4, HEXDIG) > auto
ls32 = (h16 + ':' + h16) | IPv4address > auto
IPv6address = (
string_times(6, 6, h16 + ':') + ls32 |
'::' + string_times(5, 5, h16 + ':') + ls32 |
maybe_str(h16) + '::' + string_times(4, 4, h16 + ':') + ls32 |
maybe_str(string_times(0, 1, h16 + ':') + h16) +
'::' + string_times(3, 3, h16 + ':') + ls32 |
maybe_str(string_times(0, 2, h16 + ':') + h16) +
'::' + string_times(2, 2, h16 + ':') + ls32 |
maybe_str(string_times(0, 3, h16 + ':') + h16) + '::' + h16 + ':' + ls32 |
maybe_str(string_times(0, 4, h16 + ':') + h16) + '::' + ls32 |
maybe_str(string_times(0, 5, h16 + ':') + h16) + '::' + h16 |
maybe_str(string_times(0, 6, h16 + ':') + h16) + '::'
) > pivot
IPvFuture = ('v' + string1(HEXDIG) + '.' +
string1(unreserved | sub_delims | ':')) > pivot
# As updated by RFC 6874
ZoneID = string1(unreserved | pct_encoded) > pivot
IPv6addrz = IPv6address + '%25' + ZoneID > pivot
IP_literal = '[' + (IPv6address | IPv6addrz | IPvFuture) + ']' > pivot
reg_name = string(unreserved | sub_delims | pct_encoded) > pivot
host = IP_literal | IPv4address | reg_name > pivot
port = string(DIGIT) > pivot
authority = maybe_str(userinfo + '@') + host + maybe_str(':' + port) > pivot
path_abempty = string('/' + segment) > auto
path_absolute = '/' + maybe_str(segment_nz + string('/' + segment)) > auto
path_noscheme = segment_nz_nc + string('/' + segment) > auto
path_rootless = segment_nz + string('/' + segment) > auto
path_empty = subst(u'') << empty > auto
hier_part = ('//' + authority + path_abempty |
path_absolute | path_rootless | path_empty) > pivot
query = string(pchar | '/' | '?') > pivot
fragment = string(pchar | '/' | '?') > pivot
absolute_URI = scheme + ':' + hier_part + maybe_str('?' + query) > pivot
relative_part = ('//' + authority + path_abempty |
path_absolute | path_noscheme | path_empty) > pivot
URI = (scheme + ':' + hier_part +
maybe_str('?' + query) + maybe_str('#' + fragment)) > pivot
relative_ref = (relative_part +
maybe_str('?' + query) + maybe_str('#' + fragment)) > pivot
URI_reference = URI | relative_ref > pivot
fill_names(globals(), RFC(3986))
|
[
"httpolice.parse.literal",
"httpolice.parse.string1",
"httpolice.parse.maybe_str",
"httpolice.parse.subst",
"httpolice.parse.string_times",
"httpolice.parse.string",
"httpolice.parse.octet_range",
"httpolice.citation.RFC"
] |
[((682, 695), 'httpolice.parse.string', 'string', (['pchar'], {}), '(pchar)\n', (688, 695), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((764, 778), 'httpolice.parse.string1', 'string1', (['pchar'], {}), '(pchar)\n', (771, 778), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((846, 898), 'httpolice.parse.string1', 'string1', (["(unreserved | sub_delims | '@' | pct_encoded)"], {}), "(unreserved | sub_delims | '@' | pct_encoded)\n", (853, 898), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1001, 1052), 'httpolice.parse.string', 'string', (["(unreserved | sub_delims | ':' | pct_encoded)"], {}), "(unreserved | sub_delims | ':' | pct_encoded)\n", (1007, 1052), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1442, 1468), 'httpolice.parse.string_times', 'string_times', (['(1)', '(4)', 'HEXDIG'], {}), '(1, 4, HEXDIG)\n', (1454, 1468), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((2493, 2526), 'httpolice.parse.string1', 'string1', (['(unreserved | pct_encoded)'], {}), '(unreserved | pct_encoded)\n', (2500, 2526), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((2736, 2781), 'httpolice.parse.string', 'string', (['(unreserved | sub_delims | pct_encoded)'], {}), '(unreserved | sub_delims | pct_encoded)\n', (2742, 2781), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((2892, 2905), 'httpolice.parse.string', 'string', (['DIGIT'], {}), '(DIGIT)\n', (2898, 2905), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((3061, 3082), 'httpolice.parse.string', 'string', (["('/' + segment)"], {}), "('/' + segment)\n", (3067, 3082), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((3578, 3603), 'httpolice.parse.string', 'string', (["(pchar | '/' | '?')"], {}), "(pchar | '/' | '?')\n", (3584, 3603), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((3661, 3686), 'httpolice.parse.string', 'string', (["(pchar | '/' | '?')"], {}), "(pchar | '/' | '?')\n", (3667, 3686), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((4276, 4285), 'httpolice.citation.RFC', 'RFC', (['(3986)'], {}), '(3986)\n', (4279, 4285), False, 'from httpolice.citation import RFC\n'), ((927, 966), 'httpolice.parse.string', 'string', (["(ALPHA | DIGIT | '+' | '-' | '.')"], {}), "(ALPHA | DIGIT | '+' | '-' | '.')\n", (933, 966), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((2391, 2429), 'httpolice.parse.string1', 'string1', (["(unreserved | sub_delims | ':')"], {}), "(unreserved | sub_delims | ':')\n", (2398, 2429), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((3012, 3033), 'httpolice.parse.maybe_str', 'maybe_str', (["(':' + port)"], {}), "(':' + port)\n", (3021, 3033), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((3236, 3257), 'httpolice.parse.string', 'string', (["('/' + segment)"], {}), "('/' + segment)\n", (3242, 3257), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((3312, 3333), 'httpolice.parse.string', 'string', (["('/' + segment)"], {}), "('/' + segment)\n", (3318, 3333), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((3375, 3385), 'httpolice.parse.subst', 'subst', (['u""""""'], {}), "(u'')\n", (3380, 3385), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((3773, 3795), 'httpolice.parse.maybe_str', 'maybe_str', (["('?' + query)"], {}), "('?' + query)\n", (3782, 3795), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((4010, 4035), 'httpolice.parse.maybe_str', 'maybe_str', (["('#' + fragment)"], {}), "('#' + fragment)\n", (4019, 4035), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((4132, 4157), 'httpolice.parse.maybe_str', 'maybe_str', (["('#' + fragment)"], {}), "('#' + fragment)\n", (4141, 4157), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1246, 1265), 'httpolice.parse.octet_range', 'octet_range', (['(48)', '(53)'], {}), '(48, 53)\n', (1257, 1265), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((2977, 3002), 'httpolice.parse.maybe_str', 'maybe_str', (["(userinfo + '@')"], {}), "(userinfo + '@')\n", (2986, 3002), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((3985, 4007), 'httpolice.parse.maybe_str', 'maybe_str', (["('?' + query)"], {}), "('?' + query)\n", (3994, 4007), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((4107, 4129), 'httpolice.parse.maybe_str', 'maybe_str', (["('?' + query)"], {}), "('?' + query)\n", (4116, 4129), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((2354, 2369), 'httpolice.parse.string1', 'string1', (['HEXDIG'], {}), '(HEXDIG)\n', (2361, 2369), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((3170, 3191), 'httpolice.parse.string', 'string', (["('/' + segment)"], {}), "('/' + segment)\n", (3176, 3191), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1192, 1211), 'httpolice.parse.octet_range', 'octet_range', (['(48)', '(52)'], {}), '(48, 52)\n', (1203, 1211), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((2210, 2239), 'httpolice.parse.string_times', 'string_times', (['(0)', '(6)', "(h16 + ':')"], {}), "(0, 6, h16 + ':')\n", (2222, 2239), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1104, 1123), 'httpolice.parse.octet_range', 'octet_range', (['(49)', '(57)'], {}), '(49, 57)\n', (1115, 1123), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((2144, 2173), 'httpolice.parse.string_times', 'string_times', (['(0)', '(5)', "(h16 + ':')"], {}), "(0, 5, h16 + ':')\n", (2156, 2173), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1945, 1974), 'httpolice.parse.string_times', 'string_times', (['(2)', '(2)', "(h16 + ':')"], {}), "(2, 2, h16 + ':')\n", (1957, 1974), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((2077, 2106), 'httpolice.parse.string_times', 'string_times', (['(0)', '(4)', "(h16 + ':')"], {}), "(0, 4, h16 + ':')\n", (2089, 2106), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1838, 1867), 'httpolice.parse.string_times', 'string_times', (['(3)', '(3)', "(h16 + ':')"], {}), "(3, 3, h16 + ':')\n", (1850, 1867), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1614, 1643), 'httpolice.parse.string_times', 'string_times', (['(6)', '(6)', "(h16 + ':')"], {}), "(6, 6, h16 + ':')\n", (1626, 1643), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1731, 1760), 'httpolice.parse.string_times', 'string_times', (['(4)', '(4)', "(h16 + ':')"], {}), "(4, 4, h16 + ':')\n", (1743, 1760), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((377, 389), 'httpolice.parse.literal', 'literal', (['"""!"""'], {}), "('!')\n", (384, 389), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1664, 1693), 'httpolice.parse.string_times', 'string_times', (['(5)', '(5)', "(h16 + ':')"], {}), "(5, 5, h16 + ':')\n", (1676, 1693), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1707, 1721), 'httpolice.parse.maybe_str', 'maybe_str', (['h16'], {}), '(h16)\n', (1716, 1721), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1891, 1920), 'httpolice.parse.string_times', 'string_times', (['(0)', '(2)', "(h16 + ':')"], {}), "(0, 2, h16 + ':')\n", (1903, 1920), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1998, 2027), 'httpolice.parse.string_times', 'string_times', (['(0)', '(3)', "(h16 + ':')"], {}), "(0, 3, h16 + ':')\n", (2010, 2027), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n'), ((1784, 1813), 'httpolice.parse.string_times', 'string_times', (['(0)', '(1)', "(h16 + ':')"], {}), "(0, 1, h16 + ':')\n", (1796, 1813), False, 'from httpolice.parse import auto, empty, fill_names, literal, maybe_str, octet_range, pivot, string, string1, string_times, subst\n')]
|
"""
pylinq setup script.
"""
from distutils.core import setup
with open("README.rst", 'r') as f:
readme = f.read()
with open("HISTORY.rst", 'r') as f:
history = f.read()
setup(
name='pinq',
version='0.1.1',
description='LINQ for python.',
long_description="%s\n\n%s" % (readme, history),
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/dlshriver/pinq',
packages=[
'pinq',
],
classifiers=(
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
)
)
|
[
"distutils.core.setup"
] |
[((181, 938), 'distutils.core.setup', 'setup', ([], {'name': '"""pinq"""', 'version': '"""0.1.1"""', 'description': '"""LINQ for python."""', 'long_description': "('%s\\n\\n%s' % (readme, history))", 'license': '"""MIT"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/dlshriver/pinq"""', 'packages': "['pinq']", 'classifiers': "('Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'Natural Language :: English', 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5')"}), "(name='pinq', version='0.1.1', description='LINQ for python.',\n long_description='%s\\n\\n%s' % (readme, history), license='MIT', author=\n '<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/dlshriver/pinq', packages=['pinq'], classifiers=(\n 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'Natural Language :: English', 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python', 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5'))\n", (186, 938), False, 'from distutils.core import setup\n')]
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
urls config
"""
from django.urls import include, path
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from django.conf import settings
prefix = settings.SITE_URL.lstrip('/')
# 公共URL配置
urlpatterns = [
path(prefix + 'admin/', admin.site.urls),
path(prefix + 'auth/', include('rest_framework.urls')),
path(prefix, include('home_application.urls')),
]
|
[
"django.conf.settings.SITE_URL.lstrip",
"django.urls.path",
"django.urls.include"
] |
[((873, 902), 'django.conf.settings.SITE_URL.lstrip', 'settings.SITE_URL.lstrip', (['"""/"""'], {}), "('/')\n", (897, 902), False, 'from django.conf import settings\n'), ((934, 974), 'django.urls.path', 'path', (["(prefix + 'admin/')", 'admin.site.urls'], {}), "(prefix + 'admin/', admin.site.urls)\n", (938, 974), False, 'from django.urls import include, path\n'), ((1003, 1033), 'django.urls.include', 'include', (['"""rest_framework.urls"""'], {}), "('rest_framework.urls')\n", (1010, 1033), False, 'from django.urls import include, path\n'), ((1053, 1085), 'django.urls.include', 'include', (['"""home_application.urls"""'], {}), "('home_application.urls')\n", (1060, 1085), False, 'from django.urls import include, path\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 22 08:52:50 2015
@author: sblanco
Modified by jcid to log messages to standard output
"""
import logging
import sys
class Log:
__logger__ = None
__error__ = False
def __init__(self, path, crear=False):
try:
self.__logger__ = logging.getLogger(__name__)
self.__logger__ .setLevel(logging.DEBUG)
# create a file handler
# mode w create new file:
if crear is True:
handler = logging.FileHandler(path, mode='w')
else:
handler = logging.FileHandler(path)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
self.__logger__.addHandler(handler)
# Add
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
self.__logger__.addHandler(ch)
except:
self.__error__ = True
def debug(self, msg):
if (self.__error__):
print('DEBUG: {}'.format(msg))
else:
self.__logger__.debug(msg)
def info(self, msg):
if (self.__error__):
print('INFO: {}'.format(msg))
else:
self.__logger__.info(msg)
def warn(self, msg):
if (self.__error__):
print('WARN: {}'.format(msg))
else:
self.__logger__.warn(msg)
def error(self, msg):
if (self.__error__):
print('ERROR: {}'.format(msg))
else:
self.__logger__.error(msg)
def critical(self, msg):
if (self.__error__):
print('CRITICAL: {}'.format(msg))
else:
self.__logger__.critical(msg)
|
[
"logging.Formatter",
"logging.StreamHandler",
"logging.FileHandler",
"logging.getLogger"
] |
[((340, 367), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (357, 367), False, 'import logging\n'), ((777, 839), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (794, 839), False, 'import logging\n'), ((1039, 1072), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1060, 1072), False, 'import logging\n'), ((1138, 1200), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (1155, 1200), False, 'import logging\n'), ((558, 593), 'logging.FileHandler', 'logging.FileHandler', (['path'], {'mode': '"""w"""'}), "(path, mode='w')\n", (577, 593), False, 'import logging\n'), ((640, 665), 'logging.FileHandler', 'logging.FileHandler', (['path'], {}), '(path)\n', (659, 665), False, 'import logging\n')]
|
import matplotlib.pyplot as plt
import pandas as pd
def visualize(peak_dict):
for i in range(len(peak_dict)):
df = pd.DataFrame(peak_dict['peak_%s' % i],
columns=['Position', 'Height', 'Width', 'Time'])
plt.subplot(3, 1, 1)
plt.plot(df['Time'], df['Height'])
plt.title('Peak %s Dynamics' % (i+1))
plt.ylabel('Intensity')
plt.subplot(3, 1, 2)
plt.plot(df['Time'], df['Position'])
plt.ylabel('Position')
plt.subplot(3, 1, 3)
plt.plot(df['Time'], df['Width'])
plt.ylabel('Width')
plt.xlabel('Time')
plt.show()
return
|
[
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((128, 219), 'pandas.DataFrame', 'pd.DataFrame', (["peak_dict['peak_%s' % i]"], {'columns': "['Position', 'Height', 'Width', 'Time']"}), "(peak_dict['peak_%s' % i], columns=['Position', 'Height',\n 'Width', 'Time'])\n", (140, 219), True, 'import pandas as pd\n'), ((242, 262), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (253, 262), True, 'import matplotlib.pyplot as plt\n'), ((271, 305), 'matplotlib.pyplot.plot', 'plt.plot', (["df['Time']", "df['Height']"], {}), "(df['Time'], df['Height'])\n", (279, 305), True, 'import matplotlib.pyplot as plt\n'), ((314, 353), 'matplotlib.pyplot.title', 'plt.title', (["('Peak %s Dynamics' % (i + 1))"], {}), "('Peak %s Dynamics' % (i + 1))\n", (323, 353), True, 'import matplotlib.pyplot as plt\n'), ((360, 383), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {}), "('Intensity')\n", (370, 383), True, 'import matplotlib.pyplot as plt\n'), ((393, 413), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (404, 413), True, 'import matplotlib.pyplot as plt\n'), ((422, 458), 'matplotlib.pyplot.plot', 'plt.plot', (["df['Time']", "df['Position']"], {}), "(df['Time'], df['Position'])\n", (430, 458), True, 'import matplotlib.pyplot as plt\n'), ((467, 489), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Position"""'], {}), "('Position')\n", (477, 489), True, 'import matplotlib.pyplot as plt\n'), ((499, 519), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (510, 519), True, 'import matplotlib.pyplot as plt\n'), ((528, 561), 'matplotlib.pyplot.plot', 'plt.plot', (["df['Time']", "df['Width']"], {}), "(df['Time'], df['Width'])\n", (536, 561), True, 'import matplotlib.pyplot as plt\n'), ((570, 589), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Width"""'], {}), "('Width')\n", (580, 589), True, 'import matplotlib.pyplot as plt\n'), ((598, 616), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (608, 616), True, 'import matplotlib.pyplot as plt\n'), ((625, 635), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (633, 635), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
import os
import os.path
import re
import string
import sys
import time
from glideinwms.frontend import glideinFrontendConfig, glideinFrontendDowntimeLib
def usage():
print("Usage:")
print(" manageFrontendDowntimes.py -dir frontend_dir -cmd [command] [options]")
print("where command is one of:")
print(" add - Add a scheduled downtime period")
print(" down - Put the factory down now(+delay)")
print(" up - Get the factory back up now(+delay)")
print(" check - Report if the factory is in downtime now(+delay)")
print("Other options:")
print(" -start [[[YYYY-]MM-]DD-]HH:MM[:SS] (start time for adding a downtime)")
print(" -end [[[YYYY-]MM-]DD-]HH:MM[:SS] (end time for adding a downtime)")
print(" -delay [HHh][MMm][SS[s]] (delay a downtime for down, up, and check cmds)")
# [[[YYYY-]MM-]DD-]HH:MM[:SS]
def strtxt2time(timeStr):
deftime = time.localtime(time.time())
year = deftime[0]
month = deftime[1]
day = deftime[2]
seconds = 0
darr = timeStr.split("-") # [[[YYYY-]MM-]DD-]HH:MM[:SS]
if len(darr) > 1: # we have at least part of the date
timeStr = darr[-1]
day = int(darr[-2])
if len(darr) > 2:
month = int(darr[-3])
if len(darr) > 3:
year = int(darr[-4])
tarr = timeStr.split(":")
hours = int(tarr[0])
minutes = int(tarr[1])
if len(tarr) > 2:
seconds = int(tarr[2])
outtime = time.mktime((year, month, day, hours, minutes, seconds, 0, 0, -1))
return outtime # this is epoch format
# [[[YYYY-]MM-]DD-]HH:MM[:SS]
# or
# unix_time
def str2time(timeStr):
if len(timeStr.split(":", 1)) > 1:
return strtxt2time(timeStr) # has a :, so it must be a text representation
else:
print(timeStr)
return int(timeStr) # should be a simple number
# [HHh][MMm][SS[s]]
def delay2time(delayStr):
hours = 0
minutes = 0
seconds = 0
# getting hours
harr = delayStr.split("h", 1)
if len(harr) == 2:
hours = int(harr[0])
delayStr = harr[1]
# getting minutes
marr = delayStr.split("m", 1)
if len(marr) == 2:
minutes = int(marr[0])
delayStr = marr[1]
# getting seconds
if delayStr[-1:] == "s":
delayStr = delayStr[:-1] # remove final s if present
if len(delayStr) > 0:
seconds = int(delayStr)
return seconds + 60 * (minutes + 60 * hours)
def get_downtime_fd(work_dir):
frontendDescript = glideinFrontendConfig.FrontendDescript(work_dir)
fd = glideinFrontendDowntimeLib.DowntimeFile(os.path.join(work_dir, frontendDescript.data["DowntimesFile"]))
return fd
# major commands
def add(opt_dict):
# glideinFrontendDowntimeLib.DowntimeFile( self.elementDescript.frontend_data['DowntimesFile'] )
down_fd = get_downtime_fd(opt_dict["dir"])
start_time = str2time(opt_dict["start"])
end_time = str2time(opt_dict["end"])
down_fd.addPeriod(start_time=start_time, end_time=end_time)
return 0
# this calls checkDowntime(with delayed_start_time ) first and then startDowntime(with delayed_start_time and end_time)
def down(opt_dict):
down_fd = get_downtime_fd(opt_dict["dir"])
when = delay2time(opt_dict["delay"])
if opt_dict["start"] == "None":
when += int(time.time())
else:
# delay applies only to the start time
when += str2time(opt_dict["start"])
if opt_dict["end"] == "None":
end_time = None
else:
end_time = str2time(opt_dict["end"])
if not down_fd.checkDowntime(check_time=when):
# only add a new line if not in downtime at that time
return down_fd.startDowntime(start_time=when, end_time=end_time)
else:
print("Frontend is already down. ")
return 0
# calls endDowntime( with end_time only )
def up(opt_dict):
down_fd = get_downtime_fd(opt_dict["dir"])
when = delay2time(opt_dict["delay"])
if opt_dict["end"] == "None":
when += int(time.time())
else:
# delay applies only to the end time
when += str2time(opt_dict["end"])
rtn = down_fd.endDowntime(end_time=when)
if rtn > 0:
return 0
else:
print("Frontend is not in downtime.")
return 1
def printtimes(opt_dict):
down_fd = get_downtime_fd(opt_dict["dir"])
when = delay2time(opt_dict["delay"]) + int(time.time())
down_fd.printDowntime(check_time=when)
def get_args(argv):
opt_dict = {"comment": "", "sec": "All", "delay": "0", "end": "None", "start": "None", "frontend": "All"}
index = 0
for arg in argv:
if len(argv) <= index + 1:
continue
if arg == "-cmd":
opt_dict["cmd"] = argv[index + 1]
if arg == "-dir":
opt_dict["dir"] = argv[index + 1]
if arg == "-start":
opt_dict["start"] = argv[index + 1]
if arg == "-end":
opt_dict["end"] = argv[index + 1]
if arg == "-delay":
opt_dict["delay"] = argv[index + 1]
index = index + 1
return opt_dict
def main(argv):
if len(argv) < 3:
usage()
return 1
# Get the command line arguments
opt_dict = get_args(argv)
try:
frontend_dir = opt_dict["dir"]
cmd = opt_dict["cmd"]
except KeyError as e:
usage()
print("-cmd -dir argument is required.")
return 1
try:
os.chdir(frontend_dir)
except OSError as e:
usage()
print("Failed to locate factory %s" % frontend_dir)
print("%s" % e)
return 1
if cmd == "add":
return add(opt_dict)
elif cmd == "down":
return down(opt_dict)
elif cmd == "up":
return up(opt_dict)
elif cmd == "check":
return printtimes(opt_dict)
else:
usage()
print("Invalid command %s" % cmd)
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
[
"time.time",
"glideinwms.frontend.glideinFrontendConfig.FrontendDescript",
"time.mktime",
"os.path.join",
"os.chdir"
] |
[((1640, 1706), 'time.mktime', 'time.mktime', (['(year, month, day, hours, minutes, seconds, 0, 0, -1)'], {}), '((year, month, day, hours, minutes, seconds, 0, 0, -1))\n', (1651, 1706), False, 'import time\n'), ((2679, 2727), 'glideinwms.frontend.glideinFrontendConfig.FrontendDescript', 'glideinFrontendConfig.FrontendDescript', (['work_dir'], {}), '(work_dir)\n', (2717, 2727), False, 'from glideinwms.frontend import glideinFrontendConfig, glideinFrontendDowntimeLib\n'), ((1091, 1102), 'time.time', 'time.time', ([], {}), '()\n', (1100, 1102), False, 'import time\n'), ((2777, 2839), 'os.path.join', 'os.path.join', (['work_dir', "frontendDescript.data['DowntimesFile']"], {}), "(work_dir, frontendDescript.data['DowntimesFile'])\n", (2789, 2839), False, 'import os\n'), ((5610, 5632), 'os.chdir', 'os.chdir', (['frontend_dir'], {}), '(frontend_dir)\n', (5618, 5632), False, 'import os\n'), ((3495, 3506), 'time.time', 'time.time', ([], {}), '()\n', (3504, 3506), False, 'import time\n'), ((4184, 4195), 'time.time', 'time.time', ([], {}), '()\n', (4193, 4195), False, 'import time\n'), ((4568, 4579), 'time.time', 'time.time', ([], {}), '()\n', (4577, 4579), False, 'import time\n')]
|
import csv
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
from PIL import Image
# for testing purposes, remove this later!
from sys import exit
"""Data visualization on the Airbnb New York dataset from Kaggle.
The dataset provides 16 pieces of data in the following order:
0: id
1: name
2: host_id
3: host_name
4: neighbourhood_group
5: neighbourhood
6: latitude
7: longitude
8: room_type
9: price
10: minimum_nights
11: number_of_reviews
12: last_review
13: reviews_per_month
14: calculated_host_listings_count
15: availability_365
All fields are fairly self-explanatory. I will not be using the 'id' or the
'host_id' field since they are not relevant, and the 'name' field since it does
not make sense to in this context.
This project is fully open source and free to use and share. Enjoy!
"""
header = []
data = {}
num_columns = 16
num_entries = 0
with open('new_york_data.csv', encoding='utf-8') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
# read the header
header = next(reader)
# read the entries
body = []
for row in reader:
body.append(row)
num_entries = len(body)
# parse the entries into np arrays and store them under in the data list
for i in range(num_columns):
dtype = 'str'
# price, minimum nights, number of reviews
# calculated host listings count, annual availability
if i == 9 or i == 10 or i == 11 or i == 14 or i == 15:
dtype = 'int64'
# latitude, longitude, review per month
if i == 6 or i == 7 or i == 13:
dtype = 'float64'
# reviews per month is blank sometimes in the original dataset
if i == 13:
# numpy cannot process empty strings to floats; so check for this
col_data = np.asarray([body[j][i] if len(body[j][i]) > 0 else 0.0 for j in range(num_entries)], dtype=dtype)
else:
col_data = np.asarray([body[j][i] for j in range(num_entries)], dtype=dtype)
data[header[i]] = col_data
# Area that the cover maps; experimentally determined
# (latitude, longitude)
min_coords = (40.49279, -74.26442)
max_coords = (40.91906, -73.68299)
long_range = max_coords[1] - min_coords[1]
lat_range = max_coords[0] - min_coords[0]
image_extent = (min_coords[1], max_coords[1], min_coords[0], max_coords[0])
new_york_img = Image.open('new_york_map.png')
# use large figure sizes
matplotlib.rcParams['figure.figsize'] = (12, 7)
# Room Type Bar Graph
room_types, room_types_count = np.unique(data['room_type'], return_counts=True)
plt.title('Distribution of Room Types')
room_types_norm = room_types_count / sum(room_types_count)
plt.barh(room_types, room_types_norm)
ax = plt.gca()
ax.xaxis.set_major_formatter(tck.FuncFormatter(lambda x, _: '{:.0%}'.format(x)))
plt.show()
# Neighbourhood Groups
n_groups, n_groups_count = np.unique(data['neighbourhood_group'], return_counts=True)
n_groups_colors = ['#1a535c', '#4ecdc4', '#b2ff66', '#ff6b6b', '#ffe66d']
explode = np.zeros((len(n_groups),), dtype='float64')
for idx, group in enumerate(n_groups):
if group == 'Manhattan':
explode[idx] = 0.1
break
plt.title('Distribution of Neighbourhood Groups')
wedges, texts, _ = plt.pie(
n_groups_count,
labels=n_groups,
explode=explode,
autopct='%1.1f%%',
pctdistance=0.8,
colors=n_groups_colors)
plt.show()
# Neighbourhoods
nbhs, nbhs_count = np.unique(data['neighbourhood'], return_counts=True)
# zip the neighbourhood name and count into a tuple to sort by count
nbhs_sorted_tuples = sorted(list(zip(nbhs, nbhs_count)), key=lambda elem: elem[1], reverse=True)
# unzip the sorted tuples back into a list of names and a list of counts
nbhs_sorted, nbhs_sorted_count = list(zip(*nbhs_sorted_tuples))
# take only the top 20
nbhs_sorted = nbhs_sorted[:20]
nbhs_sorted_count = nbhs_sorted_count[:20]
nbhs_price_avgs = []
for nbh in nbhs_sorted:
prices = data['price'][data['neighbourhood'] == nbh]
nbhs_price_avgs.append(np.average(prices))
fig, ax1 = plt.subplots()
plt.title('Most Popular Neighbourhoods and Average Price')
# pad the bottom of the plot to prevent text clipping
plt.subplots_adjust(bottom=0.2)
# rotate the labels so that they are easier to read
ax1.set_xticklabels(nbhs_sorted, rotation=45, ha='right')
ax1.set_xlabel('Neighbourhood');
# plot number of places on the left y-axis
ax1.bar(nbhs_sorted, nbhs_sorted_count, width=-0.2, align='edge')
ax1.set_ylabel('Number of places (blue)')
# plot average price on the right y-axis
ax2 = ax1.twinx()
ax2.bar(nbhs_sorted, nbhs_price_avgs, width=0.2, align='edge', color='orange')
ax2.set_ylabel('Average price (orange)')
plt.show()
# Price Histogram
group_prices = []
# separate the price data based on neighbourhood groups
for group in n_groups:
group_prices.append(data['price'][data['neighbourhood_group'] == group])
# plot the price data for each group separately as stacked bars
# use only prices less than 500 since most of the data belongs in this range
# this also lets us not worry about huge outliers (there are a few places whose
# nightly price is in the many thousands)
plt.hist(
group_prices,
histtype='barstacked',
bins=25,
range=(0, 500),
edgecolor='white',
color=n_groups_colors)
plt.legend(n_groups, loc='upper right')
plt.title('Distribution of Price per Night')
plt.xlim(0, 500)
plt.ylabel('Number of places')
plt.xlabel('Price range (USD)')
plt.show()
# Average Price Heatmap
# compute the average pricing over a grid of 150 by 150
price_heatmap_bins = 150
price_heatmap_sum = np.zeros((price_heatmap_bins, price_heatmap_bins), dtype='float64')
price_heatmap_count = np.zeros((price_heatmap_bins, price_heatmap_bins), dtype='float64')
for long, lat, price in zip(data['longitude'], data['latitude'], data['price']):
# take only prices below 500 to be consistent with price histogram
if price < 500:
idx_long = int((long - min_coords[1]) / long_range * price_heatmap_bins)
idx_lat = int((lat - min_coords[0]) / lat_range * price_heatmap_bins)
price_heatmap_sum[idx_lat, idx_long] += price
price_heatmap_count[idx_lat, idx_long] += 1
# ensure that a divide by zero will not occur
price_heatmap_count = np.clip(price_heatmap_count, 1, None)
price_heatmap = price_heatmap_sum / price_heatmap_count
plt.imshow(new_york_img, extent=image_extent)
plt.imshow(price_heatmap, extent=image_extent, origin='lower', alpha=0.9)
plt.colorbar()
plt.title('Average Price per Night Heatmap')
plt.show()
# Housing Scatter Plot
plt.imshow(new_york_img, extent=image_extent)
# divide locations based on groups and display them as a scatter on the New York map
for group, color in zip(n_groups, n_groups_colors):
plt.scatter(
data['longitude'][data['neighbourhood_group'] == group],
data['latitude'][data['neighbourhood_group'] == group],
s=2,
color=color)
plt.legend(n_groups, loc='upper left', markerscale=5)
plt.title('Plot of Housing Locations')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.show()
# Housing Heatmap
plt.imshow(new_york_img, extent=image_extent)
plt.hist2d(data['longitude'], data['latitude'], bins=150, alpha=0.7)
plt.title('Heatmap of Housing Locations')
plt.colorbar()
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.show()
# Minimum Nights Distribution
group_min_nights = []
# separate the price data based on neighbourhood groups
for group in n_groups:
group_min_nights.append(data['minimum_nights'][data['neighbourhood_group'] == group])
# plot the price data for each group separately as stacked bars
plt.hist(
group_min_nights,
histtype='barstacked',
bins=20,
range=(1, 21),
edgecolor='white',
color=n_groups_colors)
plt.title('Minimum Number of Nights Required')
plt.legend(n_groups, loc='upper right')
plt.xlim(1, 21)
plt.xticks(np.arange(1, 21))
plt.xlabel('Minimum Nights')
plt.ylabel('Number of Places')
plt.show()
# Number of Reviews
# compute the average number of reviews over a grid of 150 by 150
num_reviews_bins = 150
num_reviews_sum = np.zeros((num_reviews_bins, num_reviews_bins), dtype='float64')
num_reviews_count = np.zeros((num_reviews_bins, num_reviews_bins), dtype='float64')
for long, lat, price in zip(data['longitude'], data['latitude'], data['number_of_reviews']):
idx_long = int((long - min_coords[1]) / long_range * num_reviews_bins)
idx_lat = int((lat - min_coords[0]) / lat_range * num_reviews_bins)
num_reviews_sum[idx_lat, idx_long] += price
num_reviews_count[idx_lat, idx_long] += 1
# ensure that a divide by zero will not occur
num_reviews_count = np.clip(num_reviews_count, 1, None)
num_reviews = num_reviews_sum / num_reviews_count
plt.imshow(new_york_img, extent=image_extent)
plt.imshow(num_reviews, extent=image_extent, origin='lower', alpha=0.9)
plt.colorbar()
plt.title('Average Number of Reviews Heatmap')
plt.show()
|
[
"matplotlib.pyplot.title",
"csv.reader",
"numpy.clip",
"numpy.arange",
"matplotlib.pyplot.gca",
"numpy.unique",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.average",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.pie",
"matplotlib.pyplot.hist2d",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"PIL.Image.open",
"matplotlib.pyplot.xlabel"
] |
[((2392, 2422), 'PIL.Image.open', 'Image.open', (['"""new_york_map.png"""'], {}), "('new_york_map.png')\n", (2402, 2422), False, 'from PIL import Image\n'), ((2551, 2599), 'numpy.unique', 'np.unique', (["data['room_type']"], {'return_counts': '(True)'}), "(data['room_type'], return_counts=True)\n", (2560, 2599), True, 'import numpy as np\n'), ((2600, 2639), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of Room Types"""'], {}), "('Distribution of Room Types')\n", (2609, 2639), True, 'import matplotlib.pyplot as plt\n'), ((2699, 2736), 'matplotlib.pyplot.barh', 'plt.barh', (['room_types', 'room_types_norm'], {}), '(room_types, room_types_norm)\n', (2707, 2736), True, 'import matplotlib.pyplot as plt\n'), ((2742, 2751), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2749, 2751), True, 'import matplotlib.pyplot as plt\n'), ((2833, 2843), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2841, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2895, 2953), 'numpy.unique', 'np.unique', (["data['neighbourhood_group']"], {'return_counts': '(True)'}), "(data['neighbourhood_group'], return_counts=True)\n", (2904, 2953), True, 'import numpy as np\n'), ((3192, 3241), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of Neighbourhood Groups"""'], {}), "('Distribution of Neighbourhood Groups')\n", (3201, 3241), True, 'import matplotlib.pyplot as plt\n'), ((3261, 3382), 'matplotlib.pyplot.pie', 'plt.pie', (['n_groups_count'], {'labels': 'n_groups', 'explode': 'explode', 'autopct': '"""%1.1f%%"""', 'pctdistance': '(0.8)', 'colors': 'n_groups_colors'}), "(n_groups_count, labels=n_groups, explode=explode, autopct='%1.1f%%',\n pctdistance=0.8, colors=n_groups_colors)\n", (3268, 3382), True, 'import matplotlib.pyplot as plt\n'), ((3404, 3414), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3412, 3414), True, 'import matplotlib.pyplot as plt\n'), ((3452, 3504), 'numpy.unique', 'np.unique', (["data['neighbourhood']"], {'return_counts': '(True)'}), "(data['neighbourhood'], return_counts=True)\n", (3461, 3504), True, 'import numpy as np\n'), ((4065, 4079), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4077, 4079), True, 'import matplotlib.pyplot as plt\n'), ((4080, 4138), 'matplotlib.pyplot.title', 'plt.title', (['"""Most Popular Neighbourhoods and Average Price"""'], {}), "('Most Popular Neighbourhoods and Average Price')\n", (4089, 4138), True, 'import matplotlib.pyplot as plt\n'), ((4193, 4224), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.2)'}), '(bottom=0.2)\n', (4212, 4224), True, 'import matplotlib.pyplot as plt\n'), ((4698, 4708), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4706, 4708), True, 'import matplotlib.pyplot as plt\n'), ((5165, 5281), 'matplotlib.pyplot.hist', 'plt.hist', (['group_prices'], {'histtype': '"""barstacked"""', 'bins': '(25)', 'range': '(0, 500)', 'edgecolor': '"""white"""', 'color': 'n_groups_colors'}), "(group_prices, histtype='barstacked', bins=25, range=(0, 500),\n edgecolor='white', color=n_groups_colors)\n", (5173, 5281), True, 'import matplotlib.pyplot as plt\n'), ((5303, 5342), 'matplotlib.pyplot.legend', 'plt.legend', (['n_groups'], {'loc': '"""upper right"""'}), "(n_groups, loc='upper right')\n", (5313, 5342), True, 'import matplotlib.pyplot as plt\n'), ((5343, 5387), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of Price per Night"""'], {}), "('Distribution of Price per Night')\n", (5352, 5387), True, 'import matplotlib.pyplot as plt\n'), ((5388, 5404), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(500)'], {}), '(0, 500)\n', (5396, 5404), True, 'import matplotlib.pyplot as plt\n'), ((5405, 5435), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of places"""'], {}), "('Number of places')\n", (5415, 5435), True, 'import matplotlib.pyplot as plt\n'), ((5436, 5467), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Price range (USD)"""'], {}), "('Price range (USD)')\n", (5446, 5467), True, 'import matplotlib.pyplot as plt\n'), ((5468, 5478), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5476, 5478), True, 'import matplotlib.pyplot as plt\n'), ((5605, 5672), 'numpy.zeros', 'np.zeros', (['(price_heatmap_bins, price_heatmap_bins)'], {'dtype': '"""float64"""'}), "((price_heatmap_bins, price_heatmap_bins), dtype='float64')\n", (5613, 5672), True, 'import numpy as np\n'), ((5695, 5762), 'numpy.zeros', 'np.zeros', (['(price_heatmap_bins, price_heatmap_bins)'], {'dtype': '"""float64"""'}), "((price_heatmap_bins, price_heatmap_bins), dtype='float64')\n", (5703, 5762), True, 'import numpy as np\n'), ((6268, 6305), 'numpy.clip', 'np.clip', (['price_heatmap_count', '(1)', 'None'], {}), '(price_heatmap_count, 1, None)\n', (6275, 6305), True, 'import numpy as np\n'), ((6362, 6407), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_york_img'], {'extent': 'image_extent'}), '(new_york_img, extent=image_extent)\n', (6372, 6407), True, 'import matplotlib.pyplot as plt\n'), ((6408, 6481), 'matplotlib.pyplot.imshow', 'plt.imshow', (['price_heatmap'], {'extent': 'image_extent', 'origin': '"""lower"""', 'alpha': '(0.9)'}), "(price_heatmap, extent=image_extent, origin='lower', alpha=0.9)\n", (6418, 6481), True, 'import matplotlib.pyplot as plt\n'), ((6482, 6496), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6494, 6496), True, 'import matplotlib.pyplot as plt\n'), ((6497, 6541), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Price per Night Heatmap"""'], {}), "('Average Price per Night Heatmap')\n", (6506, 6541), True, 'import matplotlib.pyplot as plt\n'), ((6542, 6552), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6550, 6552), True, 'import matplotlib.pyplot as plt\n'), ((6577, 6622), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_york_img'], {'extent': 'image_extent'}), '(new_york_img, extent=image_extent)\n', (6587, 6622), True, 'import matplotlib.pyplot as plt\n'), ((6940, 6993), 'matplotlib.pyplot.legend', 'plt.legend', (['n_groups'], {'loc': '"""upper left"""', 'markerscale': '(5)'}), "(n_groups, loc='upper left', markerscale=5)\n", (6950, 6993), True, 'import matplotlib.pyplot as plt\n'), ((6994, 7032), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot of Housing Locations"""'], {}), "('Plot of Housing Locations')\n", (7003, 7032), True, 'import matplotlib.pyplot as plt\n'), ((7033, 7056), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Longitude"""'], {}), "('Longitude')\n", (7043, 7056), True, 'import matplotlib.pyplot as plt\n'), ((7057, 7079), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latitude"""'], {}), "('Latitude')\n", (7067, 7079), True, 'import matplotlib.pyplot as plt\n'), ((7080, 7090), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7088, 7090), True, 'import matplotlib.pyplot as plt\n'), ((7110, 7155), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_york_img'], {'extent': 'image_extent'}), '(new_york_img, extent=image_extent)\n', (7120, 7155), True, 'import matplotlib.pyplot as plt\n'), ((7156, 7224), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (["data['longitude']", "data['latitude']"], {'bins': '(150)', 'alpha': '(0.7)'}), "(data['longitude'], data['latitude'], bins=150, alpha=0.7)\n", (7166, 7224), True, 'import matplotlib.pyplot as plt\n'), ((7225, 7266), 'matplotlib.pyplot.title', 'plt.title', (['"""Heatmap of Housing Locations"""'], {}), "('Heatmap of Housing Locations')\n", (7234, 7266), True, 'import matplotlib.pyplot as plt\n'), ((7267, 7281), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7279, 7281), True, 'import matplotlib.pyplot as plt\n'), ((7282, 7305), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Longitude"""'], {}), "('Longitude')\n", (7292, 7305), True, 'import matplotlib.pyplot as plt\n'), ((7306, 7328), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latitude"""'], {}), "('Latitude')\n", (7316, 7328), True, 'import matplotlib.pyplot as plt\n'), ((7329, 7339), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7337, 7339), True, 'import matplotlib.pyplot as plt\n'), ((7626, 7745), 'matplotlib.pyplot.hist', 'plt.hist', (['group_min_nights'], {'histtype': '"""barstacked"""', 'bins': '(20)', 'range': '(1, 21)', 'edgecolor': '"""white"""', 'color': 'n_groups_colors'}), "(group_min_nights, histtype='barstacked', bins=20, range=(1, 21),\n edgecolor='white', color=n_groups_colors)\n", (7634, 7745), True, 'import matplotlib.pyplot as plt\n'), ((7767, 7813), 'matplotlib.pyplot.title', 'plt.title', (['"""Minimum Number of Nights Required"""'], {}), "('Minimum Number of Nights Required')\n", (7776, 7813), True, 'import matplotlib.pyplot as plt\n'), ((7814, 7853), 'matplotlib.pyplot.legend', 'plt.legend', (['n_groups'], {'loc': '"""upper right"""'}), "(n_groups, loc='upper right')\n", (7824, 7853), True, 'import matplotlib.pyplot as plt\n'), ((7854, 7869), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1)', '(21)'], {}), '(1, 21)\n', (7862, 7869), True, 'import matplotlib.pyplot as plt\n'), ((7899, 7927), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Minimum Nights"""'], {}), "('Minimum Nights')\n", (7909, 7927), True, 'import matplotlib.pyplot as plt\n'), ((7928, 7958), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Places"""'], {}), "('Number of Places')\n", (7938, 7958), True, 'import matplotlib.pyplot as plt\n'), ((7959, 7969), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7967, 7969), True, 'import matplotlib.pyplot as plt\n'), ((8098, 8161), 'numpy.zeros', 'np.zeros', (['(num_reviews_bins, num_reviews_bins)'], {'dtype': '"""float64"""'}), "((num_reviews_bins, num_reviews_bins), dtype='float64')\n", (8106, 8161), True, 'import numpy as np\n'), ((8182, 8245), 'numpy.zeros', 'np.zeros', (['(num_reviews_bins, num_reviews_bins)'], {'dtype': '"""float64"""'}), "((num_reviews_bins, num_reviews_bins), dtype='float64')\n", (8190, 8245), True, 'import numpy as np\n'), ((8646, 8681), 'numpy.clip', 'np.clip', (['num_reviews_count', '(1)', 'None'], {}), '(num_reviews_count, 1, None)\n', (8653, 8681), True, 'import numpy as np\n'), ((8732, 8777), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_york_img'], {'extent': 'image_extent'}), '(new_york_img, extent=image_extent)\n', (8742, 8777), True, 'import matplotlib.pyplot as plt\n'), ((8778, 8849), 'matplotlib.pyplot.imshow', 'plt.imshow', (['num_reviews'], {'extent': 'image_extent', 'origin': '"""lower"""', 'alpha': '(0.9)'}), "(num_reviews, extent=image_extent, origin='lower', alpha=0.9)\n", (8788, 8849), True, 'import matplotlib.pyplot as plt\n'), ((8850, 8864), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (8862, 8864), True, 'import matplotlib.pyplot as plt\n'), ((8865, 8911), 'matplotlib.pyplot.title', 'plt.title', (['"""Average Number of Reviews Heatmap"""'], {}), "('Average Number of Reviews Heatmap')\n", (8874, 8911), True, 'import matplotlib.pyplot as plt\n'), ((8912, 8922), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8920, 8922), True, 'import matplotlib.pyplot as plt\n'), ((988, 1023), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (998, 1023), False, 'import csv\n'), ((6764, 6911), 'matplotlib.pyplot.scatter', 'plt.scatter', (["data['longitude'][data['neighbourhood_group'] == group]", "data['latitude'][data['neighbourhood_group'] == group]"], {'s': '(2)', 'color': 'color'}), "(data['longitude'][data['neighbourhood_group'] == group], data[\n 'latitude'][data['neighbourhood_group'] == group], s=2, color=color)\n", (6775, 6911), True, 'import matplotlib.pyplot as plt\n'), ((7881, 7897), 'numpy.arange', 'np.arange', (['(1)', '(21)'], {}), '(1, 21)\n', (7890, 7897), True, 'import numpy as np\n'), ((4034, 4052), 'numpy.average', 'np.average', (['prices'], {}), '(prices)\n', (4044, 4052), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import cv2
import argparse
import time
import numpy as np
from training import Model
classes = []
FRAME_SIZE = 256
font = cv2.FONT_HERSHEY_SIMPLEX
switch = False
def detect(image):
crop_image = image[112:112 + FRAME_SIZE, 192:192 + FRAME_SIZE]
result = model.predict(crop_image)
index = np.argmax(result)
cv2.putText(image, classes[index], (192, 112), font, 1, (0, 255, 0), 2)
def crop_save(image):
crop_image = image[112 + 2:112 + FRAME_SIZE - 2, 192 + 2:192 + FRAME_SIZE - 2]
timestamp = str(time.time())
cv2.imwrite(
'C:\\Users\Akira.DESKTOP-HM7OVCC\Desktop\database\\' + timestamp + '.png',
crop_image,
(cv2.IMWRITE_PNG_COMPRESSION, 0)
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir',
type=str,
help='folder contains model and labels'
)
args = parser.parse_args()
if args.model_dir:
model = Model()
try:
model.load(file_path=args.model_dir + '\model.h5')
with open(args.model_dir + '\labels.txt', 'r') as f:
for line in f.readlines():
classes.append(line.strip())
except OSError as e:
print("<--------------------Unable to open file-------------------->\n", e)
else:
cv2.namedWindow('Video')
# open le camera
capture = cv2.VideoCapture(0)
while capture.isOpened():
_, frame = capture.read()
cv2.rectangle(frame, (192, 112), (192 + FRAME_SIZE, 112 + FRAME_SIZE), (0, 255, 0), 2)
if switch:
detect(frame)
cv2.imshow('Video', frame)
key = cv2.waitKey(10)
if key == ord('z'):
switch = True
elif key == ord('d'):
switch = False
elif key == ord('s'):
crop_save(frame)
elif key == ord('q'): # exit
break
capture.release()
cv2.destroyWindow('Video')
else:
print('Input no found\nTry "python predict.py -h" for more information')
|
[
"cv2.putText",
"argparse.ArgumentParser",
"numpy.argmax",
"cv2.waitKey",
"cv2.imwrite",
"time.time",
"cv2.VideoCapture",
"training.Model",
"cv2.destroyWindow",
"cv2.rectangle",
"cv2.imshow",
"cv2.namedWindow"
] |
[((326, 343), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (335, 343), True, 'import numpy as np\n'), ((348, 419), 'cv2.putText', 'cv2.putText', (['image', 'classes[index]', '(192, 112)', 'font', '(1)', '(0, 255, 0)', '(2)'], {}), '(image, classes[index], (192, 112), font, 1, (0, 255, 0), 2)\n', (359, 419), False, 'import cv2\n'), ((564, 703), 'cv2.imwrite', 'cv2.imwrite', (["('C:\\\\Users\\\\Akira.DESKTOP-HM7OVCC\\\\Desktop\\\\database\\\\' + timestamp + '.png')", 'crop_image', '(cv2.IMWRITE_PNG_COMPRESSION, 0)'], {}), "('C:\\\\Users\\\\Akira.DESKTOP-HM7OVCC\\\\Desktop\\\\database\\\\' +\n timestamp + '.png', crop_image, (cv2.IMWRITE_PNG_COMPRESSION, 0))\n", (575, 703), False, 'import cv2\n'), ((769, 794), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (792, 794), False, 'import argparse\n'), ((547, 558), 'time.time', 'time.time', ([], {}), '()\n', (556, 558), False, 'import time\n'), ((986, 993), 'training.Model', 'Model', ([], {}), '()\n', (991, 993), False, 'from training import Model\n'), ((1370, 1394), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Video"""'], {}), "('Video')\n", (1385, 1394), False, 'import cv2\n'), ((1447, 1466), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1463, 1466), False, 'import cv2\n'), ((2127, 2153), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""Video"""'], {}), "('Video')\n", (2144, 2153), False, 'import cv2\n'), ((1565, 1656), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(192, 112)', '(192 + FRAME_SIZE, 112 + FRAME_SIZE)', '(0, 255, 0)', '(2)'], {}), '(frame, (192, 112), (192 + FRAME_SIZE, 112 + FRAME_SIZE), (0, \n 255, 0), 2)\n', (1578, 1656), False, 'import cv2\n'), ((1729, 1755), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'frame'], {}), "('Video', frame)\n", (1739, 1755), False, 'import cv2\n'), ((1778, 1793), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (1789, 1793), False, 'import cv2\n')]
|
import bpy
import threading, time
from bpy.props import IntProperty, FloatProperty, StringProperty, FloatVectorProperty, CollectionProperty, EnumProperty
from bpy.types import NodeTree, Node, NodeSocket
class MyCustomSocketBank(NodeSocket):
'''Custom node socket type for creating data input points for bank information.'''
bl_idname = 'CustomSocketTypeBank'
bl_label = "Bank Information"
def update_bank_socket(self, context):
'''This function updates the output of the current node.'''
self.node.update()
bank_country: bpy.props.BoolProperty(name="Bank Country", update=update_bank_socket)
bank_items = (
('BBAN', "BBAN", "Basic Bank Account Number"),
('IBAN', "IBAN", "International Bank Account Number"),
)
bank_type: bpy.props.EnumProperty(
name="Account Type",
description="Choose the account information required",
items=bank_items,
default='BBAN',
update=update_bank_socket
)
def draw(self, context, layout, node, text):
'''This function creates the labels for the socket panels within the node.'''
if self.is_output or self.is_linked:
layout.label(text=text)
else:
layout.label(text="Bank")
layout.prop(self, "bank_country", text="Country")
layout.prop(self, "bank_type", text="Account Type")
def draw_color(self, context, node):
'''This function determines the colour of the input and output points within the socket.'''
return (1.0, 0.4, 0.216, 0.5)
|
[
"bpy.props.BoolProperty",
"bpy.props.EnumProperty"
] |
[((561, 631), 'bpy.props.BoolProperty', 'bpy.props.BoolProperty', ([], {'name': '"""Bank Country"""', 'update': 'update_bank_socket'}), "(name='Bank Country', update=update_bank_socket)\n", (583, 631), False, 'import bpy\n'), ((792, 961), 'bpy.props.EnumProperty', 'bpy.props.EnumProperty', ([], {'name': '"""Account Type"""', 'description': '"""Choose the account information required"""', 'items': 'bank_items', 'default': '"""BBAN"""', 'update': 'update_bank_socket'}), "(name='Account Type', description=\n 'Choose the account information required', items=bank_items, default=\n 'BBAN', update=update_bank_socket)\n", (814, 961), False, 'import bpy\n')]
|
# Copyright (c) 2018 <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import itertools
import heapq
from typing import List, Any, Union
class PriorityQueue(object):
REMOVED = '<removed-element>'
EXISTS_LOWER_PRIORITY = 1
EXISTS_UPDATED = 2
NONEXIST = 0
def __init__(self, ) -> None:
self.memory: List[Any] = []
self.counter = itertools.count()
self.size = 0
self.map = {}
def push(self, element: Any, priority: Union[float, int]=0) -> int:
return_value = PriorityQueue.NONEXIST
if element in self.map:
if self.map[element][0] < priority:
return PriorityQueue.EXISTS_LOWER_PRIORITY
self.remove_element(element)
return_value = PriorityQueue.EXISTS_UPDATED
else:
self.size += 1
count = next(self.counter)
entry = [priority, count, element]
self.map[element] = entry
heapq.heappush(self.memory, entry)
return return_value
def remove_element(self, element) -> None:
entry = self.map.pop(element)
entry[-1] = PriorityQueue.REMOVED
def pop(self, ) -> Any:
while self.memory:
priority, _, element = heapq.heappop(self.memory)
if element is not PriorityQueue.REMOVED:
del self.map[element]
self.size -= 1
return (priority, element)
raise KeyError("Tried to pop from an empty queue")
def empty(self, ) -> bool:
return self.size <= 0
|
[
"heapq.heappush",
"itertools.count",
"heapq.heappop"
] |
[((418, 435), 'itertools.count', 'itertools.count', ([], {}), '()\n', (433, 435), False, 'import itertools\n'), ((996, 1030), 'heapq.heappush', 'heapq.heappush', (['self.memory', 'entry'], {}), '(self.memory, entry)\n', (1010, 1030), False, 'import heapq\n'), ((1282, 1308), 'heapq.heappop', 'heapq.heappop', (['self.memory'], {}), '(self.memory)\n', (1295, 1308), False, 'import heapq\n')]
|
from django.contrib import admin
from biblioteca.models import Autor
from biblioteca.models import Libro
from biblioteca.models import Ejemplar
from biblioteca.models import Usuario
class LibroInline(admin.TabularInline):
model = Libro
class LibroAdmin(admin.ModelAdmin):
list_display = ('Titulo','Editorial','Autor')
list_display_links = ('Titulo','Editorial')
class UsuarioAdmin(admin.ModelAdmin):
list_display = ('Nombre','Telefono')
fieldsets =(
('Datos',{
'fields': ('Nombre',)
}),
('Contacto',{
'fields': ('Telefono','Direccion')
})
)
class EjemplarAdmin(admin.ModelAdmin):
list_display = ('NombreLibro', 'NombreEditorial')
list_filter = ('Libro',)
class AutorAdmin(admin.ModelAdmin):
list_display = ('Codigo','Nombre')
inlines = [LibroInline]
search_fields = ['Nombre',]
admin.site.register(Autor,AutorAdmin)
admin.site.register(Libro,LibroAdmin)
admin.site.register(Ejemplar,EjemplarAdmin)
admin.site.register(Usuario,UsuarioAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((861, 899), 'django.contrib.admin.site.register', 'admin.site.register', (['Autor', 'AutorAdmin'], {}), '(Autor, AutorAdmin)\n', (880, 899), False, 'from django.contrib import admin\n'), ((899, 937), 'django.contrib.admin.site.register', 'admin.site.register', (['Libro', 'LibroAdmin'], {}), '(Libro, LibroAdmin)\n', (918, 937), False, 'from django.contrib import admin\n'), ((937, 981), 'django.contrib.admin.site.register', 'admin.site.register', (['Ejemplar', 'EjemplarAdmin'], {}), '(Ejemplar, EjemplarAdmin)\n', (956, 981), False, 'from django.contrib import admin\n'), ((981, 1023), 'django.contrib.admin.site.register', 'admin.site.register', (['Usuario', 'UsuarioAdmin'], {}), '(Usuario, UsuarioAdmin)\n', (1000, 1023), False, 'from django.contrib import admin\n')]
|
"""
This module defines all functionality that is common to CLI programs.
"""
import sys
import act.client.proxymgr as proxymgr
from act.client.errors import NoSuchProxyError
from act.client.errors import NoProxyFileError
def getProxyIdFromProxy(proxyPath):
"""
Returns ID of proxy at the given path.
Args:
proxyPath: A string with path to the proxy.
Raises:
NoSuchProxyError: Proxy with DN and attributes of the proxy given
in proxy path is not in the database.
NoProxyFileError: No proxy on given path.
"""
manager = proxymgr.ProxyManager()
try:
return manager.getProxyIdForProxyFile(proxyPath)
except NoSuchProxyError as e:
print("error: no proxy for DN=\"{}\" and attributes=\"{}\" "\
"found in database; use actproxy".format(e.dn, e.attribute))
sys.exit(1)
except NoProxyFileError as e:
print("error: path \"{}\" is not a proxy file; use arcproxy".format(e.path))
sys.exit(2)
def showHelpOnCommandOnly(argparser):
"""Show help if command is called without parameters."""
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(0)
|
[
"act.client.proxymgr.ProxyManager",
"sys.exit"
] |
[((585, 608), 'act.client.proxymgr.ProxyManager', 'proxymgr.ProxyManager', ([], {}), '()\n', (606, 608), True, 'import act.client.proxymgr as proxymgr\n'), ((1183, 1194), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1191, 1194), False, 'import sys\n'), ((864, 875), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (872, 875), False, 'import sys\n'), ((1004, 1015), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1012, 1015), False, 'import sys\n')]
|
import sys,os
os.environ['YHYDRA_CONFIG'] = sys.argv[1]
import setup_device
from load_config import CONFIG
import glob
RAWs = glob.glob(CONFIG['RAWs'])
FASTA = glob.glob(CONFIG['FASTA'])[0]
from fasta2db import digest_fasta
f = digest_fasta(FASTA,REVERSE_DECOY=False)
r = digest_fasta(FASTA,REVERSE_DECOY=True)
from sanitize_db import sanitize_db
s = sanitize_db()
from embed_db import embed_db
e = embed_db(REVERSE_DECOY=False)
e = embed_db(REVERSE_DECOY=True)
from pyThermoRawFileParser import parse_rawfiles
raw = parse_rawfiles(RAWs)
from search import search
s = [search(RAW.replace('.raw','.mgf')) for RAW in RAWs]
#s = [search(RAW) for RAW in RAWs]
from search_score import search_score
search_score()
from fdr_filter import fdr_filter
fdr_filter()
|
[
"fasta2db.digest_fasta",
"search_score.search_score",
"fdr_filter.fdr_filter",
"embed_db.embed_db",
"sanitize_db.sanitize_db",
"pyThermoRawFileParser.parse_rawfiles",
"glob.glob"
] |
[((130, 155), 'glob.glob', 'glob.glob', (["CONFIG['RAWs']"], {}), "(CONFIG['RAWs'])\n", (139, 155), False, 'import glob\n'), ((234, 274), 'fasta2db.digest_fasta', 'digest_fasta', (['FASTA'], {'REVERSE_DECOY': '(False)'}), '(FASTA, REVERSE_DECOY=False)\n', (246, 274), False, 'from fasta2db import digest_fasta\n'), ((278, 317), 'fasta2db.digest_fasta', 'digest_fasta', (['FASTA'], {'REVERSE_DECOY': '(True)'}), '(FASTA, REVERSE_DECOY=True)\n', (290, 317), False, 'from fasta2db import digest_fasta\n'), ((358, 371), 'sanitize_db.sanitize_db', 'sanitize_db', ([], {}), '()\n', (369, 371), False, 'from sanitize_db import sanitize_db\n'), ((407, 436), 'embed_db.embed_db', 'embed_db', ([], {'REVERSE_DECOY': '(False)'}), '(REVERSE_DECOY=False)\n', (415, 436), False, 'from embed_db import embed_db\n'), ((441, 469), 'embed_db.embed_db', 'embed_db', ([], {'REVERSE_DECOY': '(True)'}), '(REVERSE_DECOY=True)\n', (449, 469), False, 'from embed_db import embed_db\n'), ((526, 546), 'pyThermoRawFileParser.parse_rawfiles', 'parse_rawfiles', (['RAWs'], {}), '(RAWs)\n', (540, 546), False, 'from pyThermoRawFileParser import parse_rawfiles\n'), ((705, 719), 'search_score.search_score', 'search_score', ([], {}), '()\n', (717, 719), False, 'from search_score import search_score\n'), ((755, 767), 'fdr_filter.fdr_filter', 'fdr_filter', ([], {}), '()\n', (765, 767), False, 'from fdr_filter import fdr_filter\n'), ((164, 190), 'glob.glob', 'glob.glob', (["CONFIG['FASTA']"], {}), "(CONFIG['FASTA'])\n", (173, 190), False, 'import glob\n')]
|
"""
Возьмите тесты из шага — https://stepik.org/lesson/138920/step/11?unit=196194
Создайте новый файл
Создайте в нем класс с тестами, который должен наследоваться от unittest.TestCase по аналогии с предыдущим шагом
Перепишите в стиле unittest тест для страницы http://suninjuly.github.io/registration1.html
Перепишите в стиле unittest второй тест для страницы http://suninjuly.github.io/registration2.html
Оформите финальные проверки в тестах в стиле unittest, например, используя проверочный метод assertEqual
Запустите получившиеся тесты из файла
Просмотрите отчёт о запуске и найдите последнюю строчку
Отправьте эту строчку в качестве ответа на это задание
"""
import time
import unittest
from selenium import webdriver
LINK_TO_REG_FORM_V1 = 'http://suninjuly.github.io/registration1.html'
LINK_TO_REG_FORM_V2 = 'http://suninjuly.github.io/registration2.html'
REG_DATA = {
'first_name': 'John',
'last_name': 'Doe',
'email': '<EMAIL>',
}
driver = webdriver.Chrome()
class TestABC(unittest.TestCase):
def check_form(self, link_to_form):
try:
driver.get(link_to_form)
required_elements = [
driver.find_element_by_xpath('//*[.="First name*"]/following-sibling::input'),
driver.find_element_by_xpath('//*[.="Last name*"]/following-sibling::input'),
driver.find_element_by_xpath('//*[.="Email*"]/following-sibling::input')
]
for element, value in zip(required_elements, REG_DATA.values()):
element.send_keys(value)
driver.find_element_by_css_selector("button.btn").click()
time.sleep(1)
self.assertEqual(
'Congratulations! You have successfully registered!',
driver.find_element_by_tag_name("h1").text
)
finally:
driver.quit()
def test_reg_form_v1(self):
self.check_form(LINK_TO_REG_FORM_V1)
def test_reg_form_v2(self):
self.check_form(LINK_TO_REG_FORM_V2)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"selenium.webdriver.Chrome",
"time.sleep"
] |
[((964, 982), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (980, 982), False, 'from selenium import webdriver\n'), ((2056, 2071), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2069, 2071), False, 'import unittest\n'), ((1637, 1650), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1647, 1650), False, 'import time\n')]
|
import json
from functools import reduce
from base64 import b64decode
from typing import Union
import requests
def generate_device_info() -> dict:
return {
"device_id": device.deviceGenerator(),
"user_agent": "Dalvik/2.1.0 (Linux; U; Android 7.1.2; SM-G965N Build/star2ltexx-user 7.1.; com.narvii.amino.master/3.4.33592)"
}
def signature(data: Union[str, dict]) -> str:
if isinstance(data, dict): data = json.dumps(data)
return requests.get(f"http://forevercynical.com/generate/signature?data={str(data)}").json()['signature']
def decode_sid(sid: str) -> dict:
return json.loads(b64decode(reduce(lambda a, e: a.replace(*e), ("-+", "_/"), sid + "=" * (-len(sid) % 4)).encode())[1:-20].decode())
def sid_to_uid(SID: str) -> str: return decode_sid(SID)["2"]
def sid_to_ip_address(SID: str) -> str: return decode_sid(SID)["4"]
|
[
"json.dumps"
] |
[((447, 463), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (457, 463), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-09 10:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0011_auto_20180327_1341'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='sector_button_text',
field=models.TextField(default='Search more industries', max_length=255),
),
]
|
[
"django.db.models.TextField"
] |
[((411, 477), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Search more industries"""', 'max_length': '(255)'}), "(default='Search more industries', max_length=255)\n", (427, 477), False, 'from django.db import migrations, models\n')]
|
from django.db.models import Q
from django.urls import reverse
from django.contrib import admin
from django.contrib.contenttypes.admin import GenericTabularInline
from .models import Voluntario, AsignacionVoluntario, DatoDeContacto
from .forms import VoluntarioForm, DatoDeContactoModelForm
from django_admin_row_actions import AdminRowActionsMixin
from django.contrib.admin.filters import DateFieldListFilter
class FechaIsNull(DateFieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
super().__init__(field, request, params, model, model_admin, field_path)
self.links = self.links[-2:]
class ContactoAdminInline(GenericTabularInline):
model = DatoDeContacto
form = DatoDeContactoModelForm
class AsignadoFilter(admin.SimpleListFilter):
title = 'Asignación'
parameter_name = 'asignado'
def lookups(self, request, model_admin):
return (
('sí', 'sí'),
('no', 'no'),
)
def queryset(self, request, queryset):
value = self.value()
if value:
isnull = value == 'no'
general = Q(
tipo='general',
asignacion_escuela__isnull=isnull,
asignacion_escuela__eleccion__slug='generales2017'
)
de_mesa = Q(
tipo='de_mesa',
asignacion_mesa__isnull=isnull,
asignacion_mesa__mesa__eleccion__slug='generales2017'
)
queryset = queryset.filter(general | de_mesa)
return queryset
class ReferenteFilter(admin.SimpleListFilter):
title = 'Referente'
parameter_name = 'referente'
def lookups(self, request, model_admin):
return (
('sí', 'sí'),
('no', 'no'),
)
def queryset(self, request, queryset):
value = self.value()
if value:
isnull = value == 'no'
queryset = queryset.filter(es_referente_de_circuito__isnull=isnull).distinct()
return queryset
class VoluntarioAdmin(AdminRowActionsMixin, admin.ModelAdmin):
def get_row_actions(self, obj):
row_actions = []
if obj.user:
row_actions.append(
{
'label': f'Loguearse como {obj.nombre}',
'url': f'/hijack/{obj.user.id}/',
'enabled': True,
}
)
row_actions += super().get_row_actions(obj)
return row_actions
def telefonos(o):
return ' / '.join(o.telefonos)
form = VoluntarioForm
list_display = ('__str__', 'dni', telefonos)
search_fields = (
'apellido', 'nombre', 'dni',
'asignacion_escuela__lugar_votacion__nombre',
'asignacion_mesa__mesa__lugar_votacion__nombre'
)
list_display_links = ('__str__',)
list_filter = ('estado', 'email_confirmado', AsignadoFilter)
# readonly_fields = ('mesas_desde_hasta',)
inlines = [
ContactoAdminInline,
]
class AsignacionVoluntarioAdmin(AdminRowActionsMixin, admin.ModelAdmin):
list_filter = ('mesa__eleccion', 'mesa__lugar_votacion__circuito')
raw_id_fields = ("mesa", "voluntario")
search_fields = (
'voluntario__apellido', 'voluntario__nombre', 'voluntario__dni',
'mesa__numero',
'mesa__lugar_votacion__nombre',
'mesa__lugar_votacion__direccion',
'mesa__lugar_votacion__barrio',
'mesa__lugar_votacion__ciudad',
)
admin.site.register(AsignacionVoluntario, AsignacionVoluntarioAdmin)
admin.site.register(Voluntario, VoluntarioAdmin)
|
[
"django.contrib.admin.site.register",
"django.db.models.Q"
] |
[((3509, 3577), 'django.contrib.admin.site.register', 'admin.site.register', (['AsignacionVoluntario', 'AsignacionVoluntarioAdmin'], {}), '(AsignacionVoluntario, AsignacionVoluntarioAdmin)\n', (3528, 3577), False, 'from django.contrib import admin\n'), ((3578, 3626), 'django.contrib.admin.site.register', 'admin.site.register', (['Voluntario', 'VoluntarioAdmin'], {}), '(Voluntario, VoluntarioAdmin)\n', (3597, 3626), False, 'from django.contrib import admin\n'), ((1141, 1249), 'django.db.models.Q', 'Q', ([], {'tipo': '"""general"""', 'asignacion_escuela__isnull': 'isnull', 'asignacion_escuela__eleccion__slug': '"""generales2017"""'}), "(tipo='general', asignacion_escuela__isnull=isnull,\n asignacion_escuela__eleccion__slug='generales2017')\n", (1142, 1249), False, 'from django.db.models import Q\n'), ((1330, 1438), 'django.db.models.Q', 'Q', ([], {'tipo': '"""de_mesa"""', 'asignacion_mesa__isnull': 'isnull', 'asignacion_mesa__mesa__eleccion__slug': '"""generales2017"""'}), "(tipo='de_mesa', asignacion_mesa__isnull=isnull,\n asignacion_mesa__mesa__eleccion__slug='generales2017')\n", (1331, 1438), False, 'from django.db.models import Q\n')]
|
# -*- coding: utf8 -*-
import arcpy
import os
import setting
class ToolValidator(object):
"""Class for validating a tool's parameter values and controlling
the behavior of the tool's dialog."""
def __init__(self):
"""Setup arcpy and the list of tool parameters."""
self.params = arcpy.GetParameterInfo()
self.current_path = setting.env[0]
self.sdefile = os.path.join(self.current_path,"vector.sde")
self.boundary = os.path.join(self.sdefile, 'SDE.Boundary')
self.province = os.path.join(self.boundary,"SDE.全国省界")
self.city = os.path.join(self.boundary,"SDE.全国市界")
self.country = os.path.join(self.boundary,"SDE.全国区县界")
self.project = os.path.join(self.sdefile, 'SDE.PROJECT')
self.fields = ['NAME',"ADMINCODE",'SHAPE@']
self.prj_fields = ['PRODUCT_TY','LOCATION','PRJ_ID','PRO_YEAR','RESOLUTION','PRJ_NAME','SHAPE@']
def initializeParameters(self):
"""Refine the properties of a tool's parameters. This method is
called when the tool is opened."""
cur = arcpy.da.SearchCursor(self.province, self.fields)
self.province_list = []
for row in cur:
self.province_name = row[0]+"-"+row[1]
self.province_list.append(self.province_name)
self.params[0].filter.list = self.province_list
cur = arcpy.da.SearchCursor(self.city, self.fields)
self.city_list = []
for row in cur:
self.city_name = row[0] + "-" + row[1]
self.city_list.append(self.city_name)
self.params[1].filter.list = self.city_list
cur = arcpy.da.SearchCursor(self.country, self.fields)
self.country_list = []
for row in cur:
self.country_name = row[0] + "-" + row[1]
self.country_list.append(self.country_name)
self.params[2].filter.list = self.country_list
# cur = arcpy.da.SearchCursor(self.project, self.prj_fields)
# self.project_list = []
# for row in cur:
# self.project_name = row[2] + "-" + row[5]
# self.project_list.append(self.project_name)
# self.params[3].filter.list = self.project_list
return
def updateParameters(self):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
self.city_list = []
self.country_list = []
if self.params[0].value:
pro_code = self.params[0].value.split('-')[1][:2]
self.expresscity = "ADMINCODE LIKE '{0}%'".format(pro_code)
cur = arcpy.da.SearchCursor(self.city, self.fields,self.expresscity)
for row in cur:
self.city_name = row[0]+"-"+row[1]
self.city_list.append(self.city_name)
self.params[1].filter.list = self.city_list
if self.params[1].value:
city_code = self.params[1].value.split('-')[1][:4]
self.expresscountry = "ADMINCODE LIKE '{0}%'".format(city_code)
cur = arcpy.da.SearchCursor(self.country, self.fields,self.expresscountry)
for row in cur:
self.country_name = row[0]+"-"+row[1]
self.country_list.append(self.country_name)
self.params[2].filter.list = self.country_list
return
def updateMessages(self):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
|
[
"arcpy.GetParameterInfo",
"os.path.join",
"arcpy.da.SearchCursor"
] |
[((294, 318), 'arcpy.GetParameterInfo', 'arcpy.GetParameterInfo', ([], {}), '()\n', (316, 318), False, 'import arcpy\n'), ((377, 422), 'os.path.join', 'os.path.join', (['self.current_path', '"""vector.sde"""'], {}), "(self.current_path, 'vector.sde')\n", (389, 422), False, 'import os\n'), ((442, 484), 'os.path.join', 'os.path.join', (['self.sdefile', '"""SDE.Boundary"""'], {}), "(self.sdefile, 'SDE.Boundary')\n", (454, 484), False, 'import os\n'), ((505, 544), 'os.path.join', 'os.path.join', (['self.boundary', '"""SDE.全国省界"""'], {}), "(self.boundary, 'SDE.全国省界')\n", (517, 544), False, 'import os\n'), ((560, 599), 'os.path.join', 'os.path.join', (['self.boundary', '"""SDE.全国市界"""'], {}), "(self.boundary, 'SDE.全国市界')\n", (572, 599), False, 'import os\n'), ((618, 658), 'os.path.join', 'os.path.join', (['self.boundary', '"""SDE.全国区县界"""'], {}), "(self.boundary, 'SDE.全国区县界')\n", (630, 658), False, 'import os\n'), ((677, 718), 'os.path.join', 'os.path.join', (['self.sdefile', '"""SDE.PROJECT"""'], {}), "(self.sdefile, 'SDE.PROJECT')\n", (689, 718), False, 'import os\n'), ((1021, 1070), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['self.province', 'self.fields'], {}), '(self.province, self.fields)\n', (1042, 1070), False, 'import arcpy\n'), ((1283, 1328), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['self.city', 'self.fields'], {}), '(self.city, self.fields)\n', (1304, 1328), False, 'import arcpy\n'), ((1526, 1574), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['self.country', 'self.fields'], {}), '(self.country, self.fields)\n', (1547, 1574), False, 'import arcpy\n'), ((2482, 2545), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['self.city', 'self.fields', 'self.expresscity'], {}), '(self.city, self.fields, self.expresscity)\n', (2503, 2545), False, 'import arcpy\n'), ((2893, 2962), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['self.country', 'self.fields', 'self.expresscountry'], {}), '(self.country, self.fields, self.expresscountry)\n', (2914, 2962), False, 'import arcpy\n')]
|
"""
Copyright (c) 2019 Cisco Systems, Inc. All rights reserved.
License at https://github.com/cisco/mercury/blob/master/LICENSE
"""
import os
import sys
import functools
from socket import AF_INET, AF_INET6, inet_ntop
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')
from pmercury.protocols.protocol import Protocol
MAX_CACHED_RESULTS = 2**24
class DHCP(Protocol):
def __init__(self, fp_database=None, config=None):
# populate fingerprint databases
self.fp_db = None
DHCP.static_data = set([0x35, 0x37])
DHCP.contextual_data = {0x03: ('router',lambda x: inet_ntop(AF_INET, x)),
0x06: ('domain_name_server',lambda x: inet_ntop(AF_INET, x)),
0x0c: ('hostname',lambda x: x.decode()),
0x0f: ('domain_name',lambda x: x.decode()),
0x32: ('requested_ip',lambda x: inet_ntop(AF_INET, x)),
0x3c: ('vendor_class_id',lambda x: x.decode())}
@staticmethod
def proto_identify(data, offset, data_len):
if data_len < 230:
return False
if (data[offset] != 0x01 or
data[offset+236] != 0x63 or
data[offset+237] != 0x82 or
data[offset+238] != 0x53 or
data[offset+239] != 0x63):
return False
return True
@staticmethod
def fingerprint(data, offset, data_len):
hardware_address_length = data[offset + 2]
cmac = data[offset+28:offset+28+hardware_address_length].hex()
context = [{'name': 'client_mac_address', 'data': '%s' % ':'.join(a+b for a,b in zip(cmac[::2], cmac[1::2]))}]
offset += 240
fp_ = '('
while offset < data_len:
kind = data[offset]
if kind == 0xff or kind == 0x00: # End / Padding
fp_ += '(%02x)' % kind
break
length = data[offset+1]
if kind in DHCP.contextual_data:
name_, transform_ = DHCP.contextual_data[kind]
context.append({'name':name_,
'data':transform_(data[offset+2:offset+2+length])})
if offset+length+2 >= data_len:
return None
if kind not in DHCP.static_data:
fp_ += '(%02x)' % kind
offset += length+2
continue
fp_ += '(%s)' % data[offset:offset+2+length].hex()
offset += length+2
fp_ += ')'
return fp_, context
|
[
"os.path.abspath",
"socket.inet_ntop"
] |
[((254, 279), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (269, 279), False, 'import os\n'), ((314, 339), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (329, 339), False, 'import os\n'), ((676, 697), 'socket.inet_ntop', 'inet_ntop', (['AF_INET', 'x'], {}), '(AF_INET, x)\n', (685, 697), False, 'from socket import AF_INET, AF_INET6, inet_ntop\n'), ((770, 791), 'socket.inet_ntop', 'inet_ntop', (['AF_INET', 'x'], {}), '(AF_INET, x)\n', (779, 791), False, 'from socket import AF_INET, AF_INET6, inet_ntop\n'), ((1007, 1028), 'socket.inet_ntop', 'inet_ntop', (['AF_INET', 'x'], {}), '(AF_INET, x)\n', (1016, 1028), False, 'from socket import AF_INET, AF_INET6, inet_ntop\n')]
|
from setuptools import setup, find_packages
setup(name='aCT',
version='0.1',
description='ARC Control Tower',
url='http://github.com/ARCControlTower/aCT',
python_requires='>=3.6',
author='aCT team',
author_email='<EMAIL>',
license='Apache 2.0',
package_dir = {'': 'src'},
packages=find_packages('src'),
install_requires=[
'mysql-connector-python', # connection to MySQL database
'htcondor', # bindings to use HTCondor to submit jobs
'pylint', # for travis automatic tests
'requests', # for APF mon calls
'prometheus_client', # Prometheus monitoring
'selinux', # SELinux context handling
'psutil', # Reports of process kills
'pyopenssl',
'flask',
'gunicorn',
'sqlalchemy'
],
entry_points={
'console_scripts': [
'actbootstrap = act.common.aCTBootstrap:main',
'actmain = act.common.aCTMain:main',
'actreport = act.common.aCTReport:main',
'actcriticalmonitor = act.common.aCTCriticalMonitor:main',
'actheartbeatwatchdog = act.atlas.aCTHeartbeatWatchdog:main',
'actldmxadmin = act.ldmx.aCTLDMXAdmin:main',
'actbulksub = act.client.actbulksub:main',
'actcat = act.client.actcat:main',
'actclean = act.client.actclean:main',
'actfetch = act.client.actfetch:main',
'actget = act.client.actget:main',
'actkill = act.client.actkill:main',
'actproxy = act.client.actproxy:main',
'actresub = act.client.actresub:main',
'actstat = act.client.actstat:main',
'actsub = act.client.actsub:main'
]
},
data_files=[
('etc/act', ['doc/aCTConfigARC.xml.template',
'doc/aCTConfigATLAS.xml.template'])
]
)
|
[
"setuptools.find_packages"
] |
[((336, 356), 'setuptools.find_packages', 'find_packages', (['"""src"""'], {}), "('src')\n", (349, 356), False, 'from setuptools import setup, find_packages\n')]
|
from rest_framework import serializers
from blog.models import Post
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
fullName = serializers.SerializerMethodField()
class Meta:
model = User
fields = ['id','username','first_name','last_name','fullName']
def get_fullName(self,obj):
return obj.get_full_name()
post_detail_url = serializers.HyperlinkedIdentityField(
view_name = 'api:post-detail',
lookup_field = 'pk'
)
class PostSerializer(serializers.ModelSerializer):
detail_url = post_detail_url
author = UserSerializer()
likes_count = serializers.SerializerMethodField()
dislikes_count = serializers.SerializerMethodField()
class Meta:
model = Post
fields = '__all__'
def get_likes_count(self,obj):
return obj.total_likes()
def get_dislikes_count(self,obj):
return obj.total_dislikes()
class PostCreateSerializer(serializers.ModelSerializer):
detail_url = post_detail_url
class Meta:
model = Post
fields = ['name','body','image','detail_url']
|
[
"rest_framework.serializers.HyperlinkedIdentityField",
"rest_framework.serializers.SerializerMethodField"
] |
[((411, 499), 'rest_framework.serializers.HyperlinkedIdentityField', 'serializers.HyperlinkedIdentityField', ([], {'view_name': '"""api:post-detail"""', 'lookup_field': '"""pk"""'}), "(view_name='api:post-detail',\n lookup_field='pk')\n", (447, 499), False, 'from rest_framework import serializers\n'), ((179, 214), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (212, 214), False, 'from rest_framework import serializers\n'), ((643, 678), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (676, 678), False, 'from rest_framework import serializers\n'), ((700, 735), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (733, 735), False, 'from rest_framework import serializers\n')]
|
from checkvist.app import cli
import sys
sys.exit(cli.cli(prog_name='checkvist'))
|
[
"checkvist.app.cli.cli"
] |
[((51, 81), 'checkvist.app.cli.cli', 'cli.cli', ([], {'prog_name': '"""checkvist"""'}), "(prog_name='checkvist')\n", (58, 81), False, 'from checkvist.app import cli\n')]
|
import app
import Data.dataAnalysis as da
import Data.liveDataLAN as lan
import Data.liveDataNA as na
import time
#summonerName,server,lane = app.getUser()
def getDataServer(server,summonerName):
if server == "LAN":
summonerId = lan.gettingSummonerId(summonerName)
tier, rank = lan.getRankedPosition(summonerId)
return summonerId,tier,rank
else:
summonerId= na.gettingSummonerId(summonerName)
tier,rank = na.getRankedPosition(summonerId)
return summonerId,tier,rank
def refreshData(lane,server,summonerName,creepsPerMin,goldPerMin):
while True:
try:
summonerId,tier,rank = getDataServer(server,summonerName)
if server == "LAN":
gameTime = lan.gettingLiveScores(summonerId)
else:
gameTime = na.gettingLiveScores(summonerId)
creepsPerMin, goldPerMin = da.gettingAvgScores(gameTime,lane,tier,rank)
time.sleep(60)
except:
print("Matched Ended")
print("Your Score should look like")
deaths = da.getAvgDeaths(lane,tier,rank)
kills = da.getAvgKills(lane,tier,rank)
assists = da.getAvgAssists(lane,tier,rank)
wardsKilled = da.getAvgWardsKilled(lane,tier,rank)
wardsPlaced = da.getAvgWardsPlaced(lane,tier,rank)
print("Your KDA: "+ str(kills)+"/"+str(deaths)+"/"+str(assists))
print("Your wards placed: "+ str(wardsPlaced)+ " yes, wards are important even if you are not a support")
print("Your wads killed: "+ str(wardsKilled)+ "yes, even killing wards is important")
return deaths,kills,assists,wardsKilled,wardsPlaced
|
[
"Data.dataAnalysis.getAvgDeaths",
"Data.liveDataNA.gettingLiveScores",
"Data.liveDataNA.getRankedPosition",
"Data.dataAnalysis.getAvgKills",
"Data.dataAnalysis.gettingAvgScores",
"Data.liveDataLAN.gettingSummonerId",
"time.sleep",
"Data.dataAnalysis.getAvgAssists",
"Data.liveDataNA.gettingSummonerId",
"Data.liveDataLAN.gettingLiveScores",
"Data.dataAnalysis.getAvgWardsKilled",
"Data.dataAnalysis.getAvgWardsPlaced",
"Data.liveDataLAN.getRankedPosition"
] |
[((243, 278), 'Data.liveDataLAN.gettingSummonerId', 'lan.gettingSummonerId', (['summonerName'], {}), '(summonerName)\n', (264, 278), True, 'import Data.liveDataLAN as lan\n'), ((300, 333), 'Data.liveDataLAN.getRankedPosition', 'lan.getRankedPosition', (['summonerId'], {}), '(summonerId)\n', (321, 333), True, 'import Data.liveDataLAN as lan\n'), ((400, 434), 'Data.liveDataNA.gettingSummonerId', 'na.gettingSummonerId', (['summonerName'], {}), '(summonerName)\n', (420, 434), True, 'import Data.liveDataNA as na\n'), ((455, 487), 'Data.liveDataNA.getRankedPosition', 'na.getRankedPosition', (['summonerId'], {}), '(summonerId)\n', (475, 487), True, 'import Data.liveDataNA as na\n'), ((901, 948), 'Data.dataAnalysis.gettingAvgScores', 'da.gettingAvgScores', (['gameTime', 'lane', 'tier', 'rank'], {}), '(gameTime, lane, tier, rank)\n', (920, 948), True, 'import Data.dataAnalysis as da\n'), ((958, 972), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (968, 972), False, 'import time\n'), ((750, 783), 'Data.liveDataLAN.gettingLiveScores', 'lan.gettingLiveScores', (['summonerId'], {}), '(summonerId)\n', (771, 783), True, 'import Data.liveDataLAN as lan\n'), ((829, 861), 'Data.liveDataNA.gettingLiveScores', 'na.gettingLiveScores', (['summonerId'], {}), '(summonerId)\n', (849, 861), True, 'import Data.liveDataNA as na\n'), ((1094, 1127), 'Data.dataAnalysis.getAvgDeaths', 'da.getAvgDeaths', (['lane', 'tier', 'rank'], {}), '(lane, tier, rank)\n', (1109, 1127), True, 'import Data.dataAnalysis as da\n'), ((1146, 1178), 'Data.dataAnalysis.getAvgKills', 'da.getAvgKills', (['lane', 'tier', 'rank'], {}), '(lane, tier, rank)\n', (1160, 1178), True, 'import Data.dataAnalysis as da\n'), ((1199, 1233), 'Data.dataAnalysis.getAvgAssists', 'da.getAvgAssists', (['lane', 'tier', 'rank'], {}), '(lane, tier, rank)\n', (1215, 1233), True, 'import Data.dataAnalysis as da\n'), ((1258, 1296), 'Data.dataAnalysis.getAvgWardsKilled', 'da.getAvgWardsKilled', (['lane', 'tier', 'rank'], {}), '(lane, tier, rank)\n', (1278, 1296), True, 'import Data.dataAnalysis as da\n'), ((1321, 1359), 'Data.dataAnalysis.getAvgWardsPlaced', 'da.getAvgWardsPlaced', (['lane', 'tier', 'rank'], {}), '(lane, tier, rank)\n', (1341, 1359), True, 'import Data.dataAnalysis as da\n')]
|
# This module is a simply a wrapper for libftpy.so that acts as a remainder how its interface is defined
# C++ library libftpy must exist in same folder for this module to work
import libftpy
"""Get bounding boxes for FASText connected components found with given parameters
Parameters
----------
image : numpy array
Short int (0-255) valued grayscale image of size 1024x1024x1
coun : int
Maximum count of boxes that are returned - boxes with keypoints that have least amount of contrast are trimmed
scales : int
How many scales are used in the scale pyramid in addition of the original scale
threshold : int
Threshold use when defining a pixel is a FT keypoint or not
positives : bool
Are boxes found for positive ("bright") keypoints included in the results
negatives : bool
Are boxes found for negative ("dark") keypoints included in the results
wLimit : int
Boxes that are wider than wLimit are trimmed from the results
hLimit : int
Boxes that are higher than hLimit are trimmed from the results
Returns
-------
boxes : numpy array
Numpy array of size N * 4 representing the found boxes in format x, y, width, height (dtype is int32)
"""
def getKpBoxes(image, count, scales, threshold, positives, negatives, wLimit, hLimit):
padding = 0
return libftpy.getKpBoxes(image, padding, count, scales, threshold, positives, negatives, wLimit, hLimit)
"""Get FASText keypoints found with given parameters
Parameters
----------
image : numpy array
Short int (0-255) valued grayscale image of size 1024x1024
count : int
Maximum count of boxes that are returned - boxes with keypoints that have least amount of contrast are trimmed
scales : int
How many scales are used in the scale pyramid in addition of the original scale
threshold : int
Threshold use when defining a pixel is a FT keypoint or not
positives : bool
Are boxes found for positive ("bright") keypoints included in the results
negatives : bool
Are boxes found for negative ("dark") keypoints included in the results
icollector[y][x][0] = y; // y
icollector[y][x][1] = x; // x
icollector[y][x][2] = stats[0]; // kp type (end or bend)
icollector[y][x][3] = stats[1]; // lightess (positive or negative)
icollector[y][x][4] = stats[2]; // max contrast for nms
icollector[y][x][5] = stats[3]; // difference used in thresholding
Returns
-------
keypoints : numpy array
Numpy array of size N * 4 representing the found keypoints in format x, y, kp type (end=1, bend=2), kp lightness (positive=1, negative=2), difference for thresholding
"""
def getFTKeypoints(image, count, scales, threshold, positives, negatives):
padding = 0
return libftpy.getFTKeypoints(image, padding, count, scales, threshold, positives, negatives)
"""Cluster CC boxes using a custom distance algorithm (which can be found in <EMAIL>)
Parameters
----------
boxes : numpy array
int32 bounding boxes for connected components in format left, top, right, top, right, bottom, left, bottom
eps : floating point number
Epsilon (distance) parameter for the dbscan algorithm
min_samples : integer
How many points have be in some points neighbourhood to be a core point
Returns
-------
labels : numpy array
One-dimensional numpy array of cluster labels for each point
Nb! NOISE points have label -2
"""
def kpBoxDBSCAN(boxes, eps, min_samples):
padding = 0
boxN = len(boxes)
return libftpy.kpBoxDBSCAN(boxes, padding, boxN, eps, min_samples)
|
[
"libftpy.getFTKeypoints",
"libftpy.kpBoxDBSCAN",
"libftpy.getKpBoxes"
] |
[((1300, 1402), 'libftpy.getKpBoxes', 'libftpy.getKpBoxes', (['image', 'padding', 'count', 'scales', 'threshold', 'positives', 'negatives', 'wLimit', 'hLimit'], {}), '(image, padding, count, scales, threshold, positives,\n negatives, wLimit, hLimit)\n', (1318, 1402), False, 'import libftpy\n'), ((2726, 2816), 'libftpy.getFTKeypoints', 'libftpy.getFTKeypoints', (['image', 'padding', 'count', 'scales', 'threshold', 'positives', 'negatives'], {}), '(image, padding, count, scales, threshold, positives,\n negatives)\n', (2748, 2816), False, 'import libftpy\n'), ((3472, 3531), 'libftpy.kpBoxDBSCAN', 'libftpy.kpBoxDBSCAN', (['boxes', 'padding', 'boxN', 'eps', 'min_samples'], {}), '(boxes, padding, boxN, eps, min_samples)\n', (3491, 3531), False, 'import libftpy\n')]
|
"""
* This file contains source code for reading and extracting data from pdfs
* @author: <NAME>
"""
import fitz
from storage import enumrateFilenames
def readAllPdf():
"""
* @def: Read all the pdf files from the stotage and return the text from all in a list and the file name
* @return: List of tuple, pdf name and text data from all the pdfs
"""
pages = []
for pdf in enumrateFilenames():
with fitz.open(pdf) as infile:
for page in infile:
pages.append((pdf, page.getText()))
return pages
def readPdf(pdfname):
"""
* @def: Read a pdf file from the stotage and return the text from all in a list and the file name
* @param -> pdfname: path to the pdf
* @return: List of tuple, pdf name and text data from the pdf
"""
pages = []
with fitz.open(pdfname) as infile:
for page in infile:
pages.append((pdfname, page.getText()))
return pages
|
[
"fitz.open",
"storage.enumrateFilenames"
] |
[((396, 415), 'storage.enumrateFilenames', 'enumrateFilenames', ([], {}), '()\n', (413, 415), False, 'from storage import enumrateFilenames\n'), ((829, 847), 'fitz.open', 'fitz.open', (['pdfname'], {}), '(pdfname)\n', (838, 847), False, 'import fitz\n'), ((430, 444), 'fitz.open', 'fitz.open', (['pdf'], {}), '(pdf)\n', (439, 444), False, 'import fitz\n')]
|
r"""
Deep Learning for Astronomers with Tensorflow
"""
from pkg_resources import get_distribution
version = __version__ = get_distribution('astroNN').version
|
[
"pkg_resources.get_distribution"
] |
[((124, 151), 'pkg_resources.get_distribution', 'get_distribution', (['"""astroNN"""'], {}), "('astroNN')\n", (140, 151), False, 'from pkg_resources import get_distribution\n')]
|
# This script is to run automate running machline for the Weber and Brebner results
import numpy as np
import json
import subprocess
import time
import multiprocessing as mp
import os
# Record and print the time required to run MachLine
start_time = time.time()
def mach_iter(AoA, Node, formulation, freestream):
if formulation == "source-free":
formulation_adjusted = "source_free"
else:
formulation_adjusted = formulation
# Modify freestream velocities based on angle of attack
AoA_rad = float(AoA)*np.pi/180
x_flow = freestream * np.cos(AoA_rad)
z_flow = freestream * np.sin(AoA_rad)
# Identify filebases used throughout iterator
filebase = "dev/results/half_wing_swept_45_deg/"
output_filebase = filebase + "MachLine_Results/" + AoA + "_degrees_AoA/half_wing_A_" + Node + "_nodes_" + AoA + "_deg_AoA_" + formulation_adjusted
# Rewrite the input files based on angle of attack and node densities
dict1 = {
"flow": {
"freestream_velocity": [
x_flow,
0.0,
z_flow
]
},
"geometry": {
"file": filebase + "half_wing_A_meshes/half_wing_A_" + Node + "_nodes.vtk",
"mirror_about": "xz",
"singularity_order": {
"doublet": 1,
"source": 0
},
"wake_model": {
"wake_shedding_angle": 90.0,
"trefftz_distance": 10000.0,
"N_panels": 1
},
"reference": {
"area": 1.0
}
},
"solver": {
"formulation": formulation,
"control_point_offset": 1.1e-05
},
"post_processing" : {
},
"output": {
"body_file": output_filebase + "_formulation.vtk",
"wake_file": output_filebase + "_formulation_wake.vtk",
"control_point_file": output_filebase + "_control_points.vtk",
"report_file": "../../report.txt"
}
}
# Identify output file location
filename = AoA + "_deg_angle_of_attack_input.json"
inputfile = filebase + 'half_wing_A_swept_inputs/' + filename
# file_location = "dev/results/half_wing_swept_45deg/test/" + AoA + "_degree_AoA_test_file_" + Node + "_nodes.json"
with open(inputfile, "w") as output_file:
json.dump(dict1, output_file, indent=4)
print("\n***",Node, "node input file saved successfully ***\n")
# Run machline with current input file
# machline_command = "./machline.exe {0}".format(inputfile)
subprocess.call(["./machline.exe", inputfile])
## Main
input_conditions = "Swept_half_wing_conditions_input.json"
json_string = open(input_conditions).read()
json_vals = json.loads(json_string)
# Identify values to pass from input conditions file
Nodes_input = json_vals["geometry"]["nodes"]
AoA_list_input = json_vals["geometry"]["AoA list"]
freestream_velocity = json_vals["flow conditions"]["freestream velocity"]
formulation_input = json_vals["solver"]["formulation"]
# Identify number of CPU available to work with
# n_processors = mp.cpu_count()
n_processors = 8
Arguments = []
# Change the working directory to the main MachLine directory for execution
os.chdir("../../../")
# Call the machline iterator with the desired inputs
with mp.Pool(n_processors) as pool:
for form in formulation_input:
for AoA in AoA_list_input:
for node in Nodes_input:
Arguments.append((AoA, node, form, freestream_velocity))
pool.starmap(mach_iter, Arguments)
pool.join()
# mach_iter(AoA_list_input, Nodes_input, formulation_input, freestream_velocity)
print("MachLine Iterator executed successfully in %s seconds" % "{:.4f}".format(time.time()-start_time))
|
[
"json.dump",
"json.loads",
"time.time",
"numpy.sin",
"subprocess.call",
"numpy.cos",
"multiprocessing.Pool",
"os.chdir"
] |
[((252, 263), 'time.time', 'time.time', ([], {}), '()\n', (261, 263), False, 'import time\n'), ((2816, 2839), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (2826, 2839), False, 'import json\n'), ((3313, 3334), 'os.chdir', 'os.chdir', (['"""../../../"""'], {}), "('../../../')\n", (3321, 3334), False, 'import os\n'), ((2642, 2688), 'subprocess.call', 'subprocess.call', (["['./machline.exe', inputfile]"], {}), "(['./machline.exe', inputfile])\n", (2657, 2688), False, 'import subprocess\n'), ((3394, 3415), 'multiprocessing.Pool', 'mp.Pool', (['n_processors'], {}), '(n_processors)\n', (3401, 3415), True, 'import multiprocessing as mp\n'), ((580, 595), 'numpy.cos', 'np.cos', (['AoA_rad'], {}), '(AoA_rad)\n', (586, 595), True, 'import numpy as np\n'), ((622, 637), 'numpy.sin', 'np.sin', (['AoA_rad'], {}), '(AoA_rad)\n', (628, 637), True, 'import numpy as np\n'), ((2412, 2451), 'json.dump', 'json.dump', (['dict1', 'output_file'], {'indent': '(4)'}), '(dict1, output_file, indent=4)\n', (2421, 2451), False, 'import json\n'), ((3828, 3839), 'time.time', 'time.time', ([], {}), '()\n', (3837, 3839), False, 'import time\n')]
|
# Automated tests for the `coloredlogs' package.
#
# Author: <NAME> <<EMAIL>>
# Last Change: November 14, 2015
# URL: https://coloredlogs.readthedocs.org
"""Automated tests for the `coloredlogs` package."""
# Standard library modules.
import logging
import logging.handlers
import os
import random
import re
import string
import sys
import tempfile
import unittest
# External dependencies.
from humanfriendly.terminal import ansi_wrap
# The module we're testing.
import coloredlogs
import coloredlogs.cli
from coloredlogs import (
CHROOT_FILES,
decrease_verbosity,
find_defined_levels,
find_handler,
find_hostname,
find_program_name,
get_level,
increase_verbosity,
install,
is_verbose,
level_to_number,
NameNormalizer,
parse_encoded_styles,
set_level,
walk_propagation_tree,
)
from coloredlogs.syslog import SystemLogging
from coloredlogs.converter import capture, convert
# External test dependencies.
from capturer import CaptureOutput
from verboselogs import VerboseLogger
from humanfriendly.compat import StringIO
# Compiled regular expression that matches a single line of output produced by
# the default log format (does not include matching of ANSI escape sequences).
PLAIN_TEXT_PATTERN = re.compile(r'''
(?P<date> \d{4}-\d{2}-\d{2} )
\s (?P<time> \d{2}:\d{2}:\d{2} )
\s (?P<hostname> \S+ )
\s (?P<logger_name> \w+ )
\[ (?P<process_id> \d+ ) \]
\s (?P<severity> [A-Z]+ )
\s (?P<message> .* )
''', re.VERBOSE)
def setUpModule():
"""Speed up the tests by disabling the demo's artificial delay."""
os.environ['COLOREDLOGS_DEMO_DELAY'] = '0'
coloredlogs.demo.DEMO_DELAY = 0
class ColoredLogsTestCase(unittest.TestCase):
"""Container for the `coloredlogs` tests."""
def test_level_to_number(self):
"""Make sure :func:`level_to_number()` works as intended."""
# Make sure the default levels are translated as expected.
assert level_to_number('debug') == logging.DEBUG
assert level_to_number('info') == logging.INFO
assert level_to_number('warn') == logging.WARNING
assert level_to_number('error') == logging.ERROR
assert level_to_number('fatal') == logging.FATAL
# Make sure bogus level names don't blow up.
assert level_to_number('bogus-level') == logging.INFO
def test_find_hostname(self):
"""Make sure :func:`~find_hostname()` works correctly."""
assert find_hostname()
# Create a temporary file as a placeholder for e.g. /etc/debian_chroot.
fd, temporary_file = tempfile.mkstemp()
try:
with open(temporary_file, 'w') as handle:
handle.write('first line\n')
handle.write('second line\n')
CHROOT_FILES.insert(0, temporary_file)
# Make sure the chroot file is being read.
assert find_hostname() == 'first line'
finally:
# Clean up.
CHROOT_FILES.pop(0)
os.unlink(temporary_file)
# Test that unreadable chroot files don't break coloredlogs.
try:
CHROOT_FILES.insert(0, temporary_file)
# Make sure that a usable value is still produced.
assert find_hostname()
finally:
# Clean up.
CHROOT_FILES.pop(0)
def test_host_name_filter(self):
"""Make sure :func:`install()` integrates with :class:`~coloredlogs.HostNameFilter()`."""
install(fmt='%(hostname)s')
with CaptureOutput() as capturer:
logging.info("A truly insignificant message ..")
output = capturer.get_text()
assert find_hostname() in output
def test_program_name_filter(self):
"""Make sure :func:`install()` integrates with :class:`~coloredlogs.ProgramNameFilter()`."""
install(fmt='%(programname)s')
with CaptureOutput() as capturer:
logging.info("A truly insignificant message ..")
output = capturer.get_text()
assert find_program_name() in output
def test_system_logging(self):
"""Make sure the :mod:`coloredlogs.syslog` module works."""
expected_message = random_string(50)
with SystemLogging(programname='coloredlogs-test-suite') as syslog:
logging.info("%s", expected_message)
if syslog and os.path.isfile('/var/log/syslog'):
with open('/var/log/syslog') as handle:
assert any(expected_message in line for line in handle)
def test_name_normalization(self):
"""Make sure :class:`~coloredlogs.NameNormalizer` works as intended."""
nn = NameNormalizer()
for canonical_name in ['debug', 'info', 'warning', 'error', 'critical']:
assert nn.normalize_name(canonical_name) == canonical_name
assert nn.normalize_name(canonical_name.upper()) == canonical_name
assert nn.normalize_name('warn') == 'warning'
assert nn.normalize_name('fatal') == 'critical'
def test_style_parsing(self):
"""Make sure :func:`~coloredlogs.parse_encoded_styles()` works as intended."""
encoded_styles = 'debug=green;warning=yellow;error=red;critical=red,bold'
decoded_styles = parse_encoded_styles(encoded_styles, normalize_key=lambda k: k.upper())
assert sorted(decoded_styles.keys()) == sorted(['debug', 'warning', 'error', 'critical'])
assert decoded_styles['debug']['color'] == 'green'
assert decoded_styles['warning']['color'] == 'yellow'
assert decoded_styles['error']['color'] == 'red'
assert decoded_styles['critical']['color'] == 'red'
assert decoded_styles['critical']['bold'] is True
def test_is_verbose(self):
"""Make sure is_verbose() does what it should :-)."""
set_level(logging.INFO)
assert not is_verbose()
set_level(logging.DEBUG)
assert is_verbose()
set_level(logging.VERBOSE)
assert is_verbose()
def test_increase_verbosity(self):
"""Make sure increase_verbosity() respects default and custom levels."""
# Start from a known state.
set_level(logging.INFO)
assert get_level() == logging.INFO
# INFO -> VERBOSE.
increase_verbosity()
assert get_level() == logging.VERBOSE
# VERBOSE -> DEBUG.
increase_verbosity()
assert get_level() == logging.DEBUG
# DEBUG -> NOTSET.
increase_verbosity()
assert get_level() == logging.NOTSET
# NOTSET -> NOTSET.
increase_verbosity()
assert get_level() == logging.NOTSET
def test_decrease_verbosity(self):
"""Make sure decrease_verbosity() respects default and custom levels."""
# Start from a known state.
set_level(logging.INFO)
assert get_level() == logging.INFO
# INFO -> WARNING.
decrease_verbosity()
assert get_level() == logging.WARNING
# WARNING -> ERROR.
decrease_verbosity()
assert get_level() == logging.ERROR
# ERROR -> CRITICAL.
decrease_verbosity()
assert get_level() == logging.CRITICAL
# CRITICAL -> CRITICAL.
decrease_verbosity()
assert get_level() == logging.CRITICAL
def test_level_discovery(self):
"""Make sure find_defined_levels() always reports the levels defined in Python's standard library."""
defined_levels = find_defined_levels()
level_values = defined_levels.values()
for number in (0, 10, 20, 30, 40, 50):
assert number in level_values
def test_walk_propagation_tree(self):
"""Make sure walk_propagation_tree() properly walks the tree of loggers."""
root, parent, child, grand_child = self.get_logger_tree()
# Check the default mode of operation.
loggers = list(walk_propagation_tree(grand_child))
assert loggers == [grand_child, child, parent, root]
# Now change the propagation (non-default mode of operation).
child.propagate = False
loggers = list(walk_propagation_tree(grand_child))
assert loggers == [grand_child, child]
def test_find_handler(self):
"""Make sure find_handler() works as intended."""
root, parent, child, grand_child = self.get_logger_tree()
# Add some handlers to the tree.
stream_handler = logging.StreamHandler()
syslog_handler = logging.handlers.SysLogHandler()
child.addHandler(stream_handler)
parent.addHandler(syslog_handler)
# Make sure the first matching handler is returned.
matched_handler, matched_logger = find_handler(grand_child, lambda h: isinstance(h, logging.Handler))
assert matched_handler is stream_handler
# Make sure the first matching handler of the given type is returned.
matched_handler, matched_logger = find_handler(child, lambda h: isinstance(h, logging.handlers.SysLogHandler))
assert matched_handler is syslog_handler
def get_logger_tree(self):
"""Create and return a tree of loggers."""
# Get the root logger.
root = logging.getLogger()
# Create a top level logger for ourselves.
parent_name = random_string()
parent = logging.getLogger(parent_name)
# Create a child logger.
child_name = '%s.%s' % (parent_name, random_string())
child = logging.getLogger(child_name)
# Create a grand child logger.
grand_child_name = '%s.%s' % (child_name, random_string())
grand_child = logging.getLogger(grand_child_name)
return root, parent, child, grand_child
def test_plain_text_output_format(self):
"""Inspect the plain text output of coloredlogs."""
logger = VerboseLogger(random_string(25))
stream = StringIO()
install(level=logging.NOTSET, logger=logger, stream=stream)
# Test that filtering on severity works.
logger.setLevel(logging.INFO)
logger.debug("No one should see this message.")
assert len(stream.getvalue().strip()) == 0
# Test that the default output format looks okay in plain text.
logger.setLevel(logging.NOTSET)
for method, severity in ((logger.debug, 'DEBUG'),
(logger.info, 'INFO'),
(logger.verbose, 'VERBOSE'),
(logger.warning, 'WARN'),
(logger.error, 'ERROR'),
(logger.critical, 'CRITICAL')):
# Prepare the text.
text = "This is a message with severity %r." % severity.lower()
# Log the message with the given severity.
method(text)
# Get the line of output generated by the handler.
output = stream.getvalue()
lines = output.splitlines()
last_line = lines[-1]
assert text in last_line
assert severity in last_line
assert PLAIN_TEXT_PATTERN.match(last_line)
def test_html_conversion(self):
"""Check the conversion from ANSI escape sequences to HTML."""
ansi_encoded_text = 'I like %s - www.eelstheband.com' % ansi_wrap('birds', bold=True, color='blue')
assert ansi_encoded_text == 'I like \x1b[1;34mbirds\x1b[0m - www.eelstheband.com'
html_encoded_text = convert(ansi_encoded_text)
assert html_encoded_text == (
'I like <span style="font-weight: bold; color: blue;">birds</span> - '
'<a href="http://www.eelstheband.com" style="color: inherit;">www.eelstheband.com</a>'
)
def test_output_interception(self):
"""Test capturing of output from external commands."""
expected_output = 'testing, 1, 2, 3 ..'
assert capture(['sh', '-c', 'echo -n %s' % expected_output]) == expected_output
def test_cli_demo(self):
"""Test the command line colored logging demonstration."""
with CaptureOutput() as capturer:
main('coloredlogs', '--demo')
output = capturer.get_text()
# Make sure the output contains all of the expected logging level names.
for name in 'debug', 'info', 'warning', 'error', 'critical':
assert name.upper() in output
def test_cli_conversion(self):
"""Test the command line HTML conversion."""
output = main('coloredlogs', '--convert', 'coloredlogs', '--demo', capture=True)
# Make sure the output is encoded as HTML.
assert '<span' in output
def test_implicit_usage_message(self):
"""Test that the usage message is shown when no actions are given."""
assert 'Usage:' in main('coloredlogs', capture=True)
def test_explicit_usage_message(self):
"""Test that the usage message is shown when ``--help`` is given."""
assert 'Usage:' in main('coloredlogs', '--help', capture=True)
def main(*arguments, **options):
"""Simple wrapper to run the command line interface."""
capture = options.get('capture', False)
saved_argv = sys.argv
saved_stdout = sys.stdout
try:
sys.argv = arguments
if capture:
sys.stdout = StringIO()
coloredlogs.cli.main()
if capture:
return sys.stdout.getvalue()
finally:
sys.argv = saved_argv
sys.stdout = saved_stdout
def random_string(length=25):
"""Generate a random string."""
return ''.join(random.choice(string.ascii_letters) for i in range(25))
|
[
"coloredlogs.get_level",
"coloredlogs.converter.capture",
"os.unlink",
"coloredlogs.CHROOT_FILES.pop",
"os.path.isfile",
"logging.handlers.SysLogHandler",
"coloredlogs.increase_verbosity",
"coloredlogs.find_program_name",
"coloredlogs.find_defined_levels",
"humanfriendly.compat.StringIO",
"coloredlogs.converter.convert",
"coloredlogs.set_level",
"coloredlogs.decrease_verbosity",
"coloredlogs.walk_propagation_tree",
"coloredlogs.syslog.SystemLogging",
"logging.StreamHandler",
"coloredlogs.cli.main",
"coloredlogs.is_verbose",
"sys.stdout.getvalue",
"humanfriendly.terminal.ansi_wrap",
"re.compile",
"coloredlogs.level_to_number",
"tempfile.mkstemp",
"coloredlogs.CHROOT_FILES.insert",
"coloredlogs.install",
"random.choice",
"capturer.CaptureOutput",
"logging.info",
"coloredlogs.NameNormalizer",
"coloredlogs.find_hostname",
"logging.getLogger"
] |
[((1263, 1535), 're.compile', 're.compile', (['"""\n (?P<date> \\\\d{4}-\\\\d{2}-\\\\d{2} )\n \\\\s (?P<time> \\\\d{2}:\\\\d{2}:\\\\d{2} )\n \\\\s (?P<hostname> \\\\S+ )\n \\\\s (?P<logger_name> \\\\w+ )\n \\\\[ (?P<process_id> \\\\d+ ) \\\\]\n \\\\s (?P<severity> [A-Z]+ )\n \\\\s (?P<message> .* )\n"""', 're.VERBOSE'], {}), '(\n """\n (?P<date> \\\\d{4}-\\\\d{2}-\\\\d{2} )\n \\\\s (?P<time> \\\\d{2}:\\\\d{2}:\\\\d{2} )\n \\\\s (?P<hostname> \\\\S+ )\n \\\\s (?P<logger_name> \\\\w+ )\n \\\\[ (?P<process_id> \\\\d+ ) \\\\]\n \\\\s (?P<severity> [A-Z]+ )\n \\\\s (?P<message> .* )\n"""\n , re.VERBOSE)\n', (1273, 1535), False, 'import re\n'), ((2472, 2487), 'coloredlogs.find_hostname', 'find_hostname', ([], {}), '()\n', (2485, 2487), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((2597, 2615), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (2613, 2615), False, 'import tempfile\n'), ((3490, 3517), 'coloredlogs.install', 'install', ([], {'fmt': '"""%(hostname)s"""'}), "(fmt='%(hostname)s')\n", (3497, 3517), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((3857, 3887), 'coloredlogs.install', 'install', ([], {'fmt': '"""%(programname)s"""'}), "(fmt='%(programname)s')\n", (3864, 3887), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((4681, 4697), 'coloredlogs.NameNormalizer', 'NameNormalizer', ([], {}), '()\n', (4695, 4697), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((5836, 5859), 'coloredlogs.set_level', 'set_level', (['logging.INFO'], {}), '(logging.INFO)\n', (5845, 5859), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((5900, 5924), 'coloredlogs.set_level', 'set_level', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (5909, 5924), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((5940, 5952), 'coloredlogs.is_verbose', 'is_verbose', ([], {}), '()\n', (5950, 5952), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((5961, 5987), 'coloredlogs.set_level', 'set_level', (['logging.VERBOSE'], {}), '(logging.VERBOSE)\n', (5970, 5987), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6003, 6015), 'coloredlogs.is_verbose', 'is_verbose', ([], {}), '()\n', (6013, 6015), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6181, 6204), 'coloredlogs.set_level', 'set_level', (['logging.INFO'], {}), '(logging.INFO)\n', (6190, 6204), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6283, 6303), 'coloredlogs.increase_verbosity', 'increase_verbosity', ([], {}), '()\n', (6301, 6303), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6386, 6406), 'coloredlogs.increase_verbosity', 'increase_verbosity', ([], {}), '()\n', (6404, 6406), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6486, 6506), 'coloredlogs.increase_verbosity', 'increase_verbosity', ([], {}), '()\n', (6504, 6506), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6588, 6608), 'coloredlogs.increase_verbosity', 'increase_verbosity', ([], {}), '()\n', (6606, 6608), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6819, 6842), 'coloredlogs.set_level', 'set_level', (['logging.INFO'], {}), '(logging.INFO)\n', (6828, 6842), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6921, 6941), 'coloredlogs.decrease_verbosity', 'decrease_verbosity', ([], {}), '()\n', (6939, 6941), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((7024, 7044), 'coloredlogs.decrease_verbosity', 'decrease_verbosity', ([], {}), '()\n', (7042, 7044), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((7126, 7146), 'coloredlogs.decrease_verbosity', 'decrease_verbosity', ([], {}), '()\n', (7144, 7146), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((7234, 7254), 'coloredlogs.decrease_verbosity', 'decrease_verbosity', ([], {}), '()\n', (7252, 7254), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((7474, 7495), 'coloredlogs.find_defined_levels', 'find_defined_levels', ([], {}), '()\n', (7493, 7495), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((8424, 8447), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (8445, 8447), False, 'import logging\n'), ((8473, 8505), 'logging.handlers.SysLogHandler', 'logging.handlers.SysLogHandler', ([], {}), '()\n', (8503, 8505), False, 'import logging\n'), ((9183, 9202), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (9200, 9202), False, 'import logging\n'), ((9309, 9339), 'logging.getLogger', 'logging.getLogger', (['parent_name'], {}), '(parent_name)\n', (9326, 9339), False, 'import logging\n'), ((9451, 9480), 'logging.getLogger', 'logging.getLogger', (['child_name'], {}), '(child_name)\n', (9468, 9480), False, 'import logging\n'), ((9609, 9644), 'logging.getLogger', 'logging.getLogger', (['grand_child_name'], {}), '(grand_child_name)\n', (9626, 9644), False, 'import logging\n'), ((9866, 9876), 'humanfriendly.compat.StringIO', 'StringIO', ([], {}), '()\n', (9874, 9876), False, 'from humanfriendly.compat import StringIO\n'), ((9885, 9944), 'coloredlogs.install', 'install', ([], {'level': 'logging.NOTSET', 'logger': 'logger', 'stream': 'stream'}), '(level=logging.NOTSET, logger=logger, stream=stream)\n', (9892, 9944), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((11440, 11466), 'coloredlogs.converter.convert', 'convert', (['ansi_encoded_text'], {}), '(ansi_encoded_text)\n', (11447, 11466), False, 'from coloredlogs.converter import capture, convert\n'), ((13305, 13327), 'coloredlogs.cli.main', 'coloredlogs.cli.main', ([], {}), '()\n', (13325, 13327), False, 'import coloredlogs\n'), ((1972, 1996), 'coloredlogs.level_to_number', 'level_to_number', (['"""debug"""'], {}), "('debug')\n", (1987, 1996), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((2029, 2052), 'coloredlogs.level_to_number', 'level_to_number', (['"""info"""'], {}), "('info')\n", (2044, 2052), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((2084, 2107), 'coloredlogs.level_to_number', 'level_to_number', (['"""warn"""'], {}), "('warn')\n", (2099, 2107), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((2142, 2166), 'coloredlogs.level_to_number', 'level_to_number', (['"""error"""'], {}), "('error')\n", (2157, 2166), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((2199, 2223), 'coloredlogs.level_to_number', 'level_to_number', (['"""fatal"""'], {}), "('fatal')\n", (2214, 2223), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((2309, 2339), 'coloredlogs.level_to_number', 'level_to_number', (['"""bogus-level"""'], {}), "('bogus-level')\n", (2324, 2339), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((2786, 2824), 'coloredlogs.CHROOT_FILES.insert', 'CHROOT_FILES.insert', (['(0)', 'temporary_file'], {}), '(0, temporary_file)\n', (2805, 2824), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((2984, 3003), 'coloredlogs.CHROOT_FILES.pop', 'CHROOT_FILES.pop', (['(0)'], {}), '(0)\n', (3000, 3003), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((3016, 3041), 'os.unlink', 'os.unlink', (['temporary_file'], {}), '(temporary_file)\n', (3025, 3041), False, 'import os\n'), ((3136, 3174), 'coloredlogs.CHROOT_FILES.insert', 'CHROOT_FILES.insert', (['(0)', 'temporary_file'], {}), '(0, temporary_file)\n', (3155, 3174), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((3257, 3272), 'coloredlogs.find_hostname', 'find_hostname', ([], {}), '()\n', (3270, 3272), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((3326, 3345), 'coloredlogs.CHROOT_FILES.pop', 'CHROOT_FILES.pop', (['(0)'], {}), '(0)\n', (3342, 3345), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((3531, 3546), 'capturer.CaptureOutput', 'CaptureOutput', ([], {}), '()\n', (3544, 3546), False, 'from capturer import CaptureOutput\n'), ((3572, 3620), 'logging.info', 'logging.info', (['"""A truly insignificant message .."""'], {}), "('A truly insignificant message ..')\n", (3584, 3620), False, 'import logging\n'), ((3901, 3916), 'capturer.CaptureOutput', 'CaptureOutput', ([], {}), '()\n', (3914, 3916), False, 'from capturer import CaptureOutput\n'), ((3942, 3990), 'logging.info', 'logging.info', (['"""A truly insignificant message .."""'], {}), "('A truly insignificant message ..')\n", (3954, 3990), False, 'import logging\n'), ((4243, 4294), 'coloredlogs.syslog.SystemLogging', 'SystemLogging', ([], {'programname': '"""coloredlogs-test-suite"""'}), "(programname='coloredlogs-test-suite')\n", (4256, 4294), False, 'from coloredlogs.syslog import SystemLogging\n'), ((4318, 4354), 'logging.info', 'logging.info', (['"""%s"""', 'expected_message'], {}), "('%s', expected_message)\n", (4330, 4354), False, 'import logging\n'), ((5879, 5891), 'coloredlogs.is_verbose', 'is_verbose', ([], {}), '()\n', (5889, 5891), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6220, 6231), 'coloredlogs.get_level', 'get_level', ([], {}), '()\n', (6229, 6231), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6319, 6330), 'coloredlogs.get_level', 'get_level', ([], {}), '()\n', (6328, 6330), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6422, 6433), 'coloredlogs.get_level', 'get_level', ([], {}), '()\n', (6431, 6433), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6522, 6533), 'coloredlogs.get_level', 'get_level', ([], {}), '()\n', (6531, 6533), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6624, 6635), 'coloredlogs.get_level', 'get_level', ([], {}), '()\n', (6633, 6635), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6858, 6869), 'coloredlogs.get_level', 'get_level', ([], {}), '()\n', (6867, 6869), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((6957, 6968), 'coloredlogs.get_level', 'get_level', ([], {}), '()\n', (6966, 6968), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((7060, 7071), 'coloredlogs.get_level', 'get_level', ([], {}), '()\n', (7069, 7071), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((7162, 7173), 'coloredlogs.get_level', 'get_level', ([], {}), '()\n', (7171, 7173), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((7270, 7281), 'coloredlogs.get_level', 'get_level', ([], {}), '()\n', (7279, 7281), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((7895, 7929), 'coloredlogs.walk_propagation_tree', 'walk_propagation_tree', (['grand_child'], {}), '(grand_child)\n', (7916, 7929), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((8117, 8151), 'coloredlogs.walk_propagation_tree', 'walk_propagation_tree', (['grand_child'], {}), '(grand_child)\n', (8138, 8151), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((11278, 11321), 'humanfriendly.terminal.ansi_wrap', 'ansi_wrap', (['"""birds"""'], {'bold': '(True)', 'color': '"""blue"""'}), "('birds', bold=True, color='blue')\n", (11287, 11321), False, 'from humanfriendly.terminal import ansi_wrap\n'), ((11884, 11937), 'coloredlogs.converter.capture', 'capture', (["['sh', '-c', 'echo -n %s' % expected_output]"], {}), "(['sh', '-c', 'echo -n %s' % expected_output])\n", (11891, 11937), False, 'from coloredlogs.converter import capture, convert\n'), ((12067, 12082), 'capturer.CaptureOutput', 'CaptureOutput', ([], {}), '()\n', (12080, 12082), False, 'from capturer import CaptureOutput\n'), ((13286, 13296), 'humanfriendly.compat.StringIO', 'StringIO', ([], {}), '()\n', (13294, 13296), False, 'from humanfriendly.compat import StringIO\n'), ((13367, 13388), 'sys.stdout.getvalue', 'sys.stdout.getvalue', ([], {}), '()\n', (13386, 13388), False, 'import sys\n'), ((13553, 13588), 'random.choice', 'random.choice', (['string.ascii_letters'], {}), '(string.ascii_letters)\n', (13566, 13588), False, 'import random\n'), ((2899, 2914), 'coloredlogs.find_hostname', 'find_hostname', ([], {}), '()\n', (2912, 2914), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((3681, 3696), 'coloredlogs.find_hostname', 'find_hostname', ([], {}), '()\n', (3694, 3696), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((4051, 4070), 'coloredlogs.find_program_name', 'find_program_name', ([], {}), '()\n', (4068, 4070), False, 'from coloredlogs import CHROOT_FILES, decrease_verbosity, find_defined_levels, find_handler, find_hostname, find_program_name, get_level, increase_verbosity, install, is_verbose, level_to_number, NameNormalizer, parse_encoded_styles, set_level, walk_propagation_tree\n'), ((4381, 4414), 'os.path.isfile', 'os.path.isfile', (['"""/var/log/syslog"""'], {}), "('/var/log/syslog')\n", (4395, 4414), False, 'import os\n')]
|
# This file is part of the NESi software.
#
# Copyright (c) 2020
# Original Software Design by <NAME> <https://github.com/etingof>.
#
# Software adapted by inexio <https://github.com/inexio>.
# - <NAME> <https://github.com/unkn0wn-user>
# - <NAME> <https://github.com/Connyko65>
# - <NAME> <https://github.com/Dinker1996>
#
# License: https://github.com/inexio/NESi/LICENSE.rst
import uuid
from nesi.devices.softbox.api import db
class PortProfile(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(64))
description = db.Column(db.String())
box_id = db.Column(db.Integer, db.ForeignKey('box.id'))
type = db.Column(db.Enum('service', 'spectrum', 'dpbo', 'rtx', 'vect', 'sos', 'ghs', 'qos', 'policer', 'vce',
'data-rate', 'noise-margin', 'inp-delay', 'mode-specific-psd'))
# Alcatel Data
up_policer = db.Column(db.String(), default=None, nullable=True)
down_policer = db.Column(db.String(), default=None, nullable=True)
committed_info_rate = db.Column(db.Integer(), default=0, nullable=False)
committed_burst_size = db.Column(db.Integer(), default=0, nullable=False)
logical_flow_type = db.Column(db.Enum('generic'), default='generic')
# Huawei data
maximum_bit_error_ratio = db.Column(db.Integer(), default=None)
path_mode = db.Column(db.Integer(), default=None)
rate = db.Column(db.String(), default=None)
etr_max = db.Column(db.Integer(), default=None)
etr_min = db.Column(db.Integer(), default=None)
ndr_max = db.Column(db.Integer(), default=None)
working_mode = db.Column(db.Integer(), default=None)
eside_electrical_length = db.Column(db.String(), default=None)
assumed_exchange_psd = db.Column(db.String(), default=None)
eside_cable_model = db.Column(db.String(), default=None)
min_usable_signal = db.Column(db.Integer(), default=None)
span_frequency = db.Column(db.String(), default=None)
dpbo_calculation = db.Column(db.Integer(), default=None)
snr_margin = db.Column(db.String(), default=None)
rate_adapt = db.Column(db.String(), default=None)
snr_mode = db.Column(db.String(), default=None)
inp_4khz = db.Column(db.String(), default=None)
inp_8khz = db.Column(db.String(), default=None)
interleaved_delay = db.Column(db.String(), default=None)
delay_variation = db.Column(db.Integer(), default=None)
channel_policy = db.Column(db.Integer(), default=None)
nominal_transmit_PSD_ds = db.Column(db.Integer(), default=None)
nominal_transmit_PSD_us = db.Column(db.Integer(), default=None)
aggregate_transmit_power_ds = db.Column(db.Integer(), default=None)
aggregate_transmit_power_us = db.Column(db.Integer(), default=None)
aggregate_receive_power_us = db.Column(db.Integer(), default=None)
upstream_psd_mask_selection = db.Column(db.Integer(), default=None)
psd_class_mask = db.Column(db.Integer(), default=None)
psd_limit_mask = db.Column(db.Integer(), default=None)
l0_time = db.Column(db.Integer(), default=None)
l2_time = db.Column(db.Integer(), default=None)
l3_time = db.Column(db.Integer(), default=None)
max_transmite_power_reduction = db.Column(db.Integer(), default=None)
total_max_power_reduction = db.Column(db.Integer(), default=None)
bit_swap_ds = db.Column(db.Integer(), default=None)
bit_swap_us = db.Column(db.Integer(), default=None)
overhead_datarate_us = db.Column(db.Integer(), default=None)
overhead_datarate_ds = db.Column(db.Integer(), default=None)
allow_transitions_to_idle = db.Column(db.Integer(), default=None)
allow_transitions_to_lowpower = db.Column(db.Integer(), default=None)
reference_clock = db.Column(db.String(), default=None)
cyclic_extension_flag = db.Column(db.Integer(), default=None)
force_inp_ds = db.Column(db.Integer(), default=None)
force_inp_us = db.Column(db.Integer(), default=None)
g_993_2_profile = db.Column(db.Integer(), default=None)
mode_specific = db.Column(db.String(), default=None)
transmode = db.Column(db.String(), default=None)
T1_413 = db.Column(db.String(), default=None)
G_992_1 = db.Column(db.String(), default=None)
G_992_2 = db.Column(db.String(), default=None)
G_992_3 = db.Column(db.String(), default=None)
G_992_4 = db.Column(db.String(), default=None)
G_992_5 = db.Column(db.String(), default=None)
AnnexB_G_993_2 = db.Column(db.String(), default=None)
ETSI = db.Column(db.String(), default=None)
us0_psd_mask = db.Column(db.Integer(), default=None)
vdsltoneblackout = db.Column(db.String(), default=None)
internal_id = db.Column(db.Integer(), default=None)
vmac_ipoe = db.Column(db.Enum('enable', 'disable'), default=None)
vmac_pppoe = db.Column(db.Enum('enable', 'disable'), default=None)
vmac_pppoa = db.Column(db.Enum('enable', 'disable'), default=None)
vlan_mac = db.Column(db.Enum('forwarding', 'discard'), default=None)
packet_policy_multicast = db.Column(db.Enum('forward', 'discard'), default=None)
packet_policy_unicast = db.Column(db.Enum('forward', 'discard'), default=None)
security_anti_ipspoofing = db.Column(db.Enum('enable', 'disable'), default=None)
security_anti_macspoofing = db.Column(db.Enum('enable', 'disable'), default=None)
igmp_mismatch = db.Column(db.Enum('transparent'), default=None)
commit = db.Column(db.Boolean(), default=False)
number = db.Column(db.Integer, default=None)
|
[
"nesi.devices.softbox.api.db.Column",
"nesi.devices.softbox.api.db.Boolean",
"nesi.devices.softbox.api.db.String",
"nesi.devices.softbox.api.db.ForeignKey",
"nesi.devices.softbox.api.db.Integer",
"nesi.devices.softbox.api.db.Enum"
] |
[((5442, 5477), 'nesi.devices.softbox.api.db.Column', 'db.Column', (['db.Integer'], {'default': 'None'}), '(db.Integer, default=None)\n', (5451, 5477), False, 'from nesi.devices.softbox.api import db\n'), ((481, 493), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (491, 493), False, 'from nesi.devices.softbox.api import db\n'), ((534, 547), 'nesi.devices.softbox.api.db.String', 'db.String', (['(64)'], {}), '(64)\n', (543, 547), False, 'from nesi.devices.softbox.api import db\n'), ((577, 588), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (586, 588), False, 'from nesi.devices.softbox.api import db\n'), ((625, 648), 'nesi.devices.softbox.api.db.ForeignKey', 'db.ForeignKey', (['"""box.id"""'], {}), "('box.id')\n", (638, 648), False, 'from nesi.devices.softbox.api import db\n'), ((671, 834), 'nesi.devices.softbox.api.db.Enum', 'db.Enum', (['"""service"""', '"""spectrum"""', '"""dpbo"""', '"""rtx"""', '"""vect"""', '"""sos"""', '"""ghs"""', '"""qos"""', '"""policer"""', '"""vce"""', '"""data-rate"""', '"""noise-margin"""', '"""inp-delay"""', '"""mode-specific-psd"""'], {}), "('service', 'spectrum', 'dpbo', 'rtx', 'vect', 'sos', 'ghs', 'qos',\n 'policer', 'vce', 'data-rate', 'noise-margin', 'inp-delay',\n 'mode-specific-psd')\n", (678, 834), False, 'from nesi.devices.softbox.api import db\n'), ((904, 915), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (913, 915), False, 'from nesi.devices.softbox.api import db\n'), ((975, 986), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (984, 986), False, 'from nesi.devices.softbox.api import db\n'), ((1053, 1065), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (1063, 1065), False, 'from nesi.devices.softbox.api import db\n'), ((1131, 1143), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (1141, 1143), False, 'from nesi.devices.softbox.api import db\n'), ((1206, 1224), 'nesi.devices.softbox.api.db.Enum', 'db.Enum', (['"""generic"""'], {}), "('generic')\n", (1213, 1224), False, 'from nesi.devices.softbox.api import db\n'), ((1304, 1316), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (1314, 1316), False, 'from nesi.devices.softbox.api import db\n'), ((1358, 1370), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (1368, 1370), False, 'from nesi.devices.softbox.api import db\n'), ((1407, 1418), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (1416, 1418), False, 'from nesi.devices.softbox.api import db\n'), ((1458, 1470), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (1468, 1470), False, 'from nesi.devices.softbox.api import db\n'), ((1510, 1522), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (1520, 1522), False, 'from nesi.devices.softbox.api import db\n'), ((1562, 1574), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (1572, 1574), False, 'from nesi.devices.softbox.api import db\n'), ((1619, 1631), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (1629, 1631), False, 'from nesi.devices.softbox.api import db\n'), ((1687, 1698), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (1696, 1698), False, 'from nesi.devices.softbox.api import db\n'), ((1751, 1762), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (1760, 1762), False, 'from nesi.devices.softbox.api import db\n'), ((1812, 1823), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (1821, 1823), False, 'from nesi.devices.softbox.api import db\n'), ((1873, 1885), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (1883, 1885), False, 'from nesi.devices.softbox.api import db\n'), ((1932, 1943), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (1941, 1943), False, 'from nesi.devices.softbox.api import db\n'), ((1992, 2004), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (2002, 2004), False, 'from nesi.devices.softbox.api import db\n'), ((2047, 2058), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (2056, 2058), False, 'from nesi.devices.softbox.api import db\n'), ((2101, 2112), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (2110, 2112), False, 'from nesi.devices.softbox.api import db\n'), ((2153, 2164), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (2162, 2164), False, 'from nesi.devices.softbox.api import db\n'), ((2205, 2216), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (2214, 2216), False, 'from nesi.devices.softbox.api import db\n'), ((2257, 2268), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (2266, 2268), False, 'from nesi.devices.softbox.api import db\n'), ((2318, 2329), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (2327, 2329), False, 'from nesi.devices.softbox.api import db\n'), ((2377, 2389), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (2387, 2389), False, 'from nesi.devices.softbox.api import db\n'), ((2436, 2448), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (2446, 2448), False, 'from nesi.devices.softbox.api import db\n'), ((2504, 2516), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (2514, 2516), False, 'from nesi.devices.softbox.api import db\n'), ((2572, 2584), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (2582, 2584), False, 'from nesi.devices.softbox.api import db\n'), ((2644, 2656), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (2654, 2656), False, 'from nesi.devices.softbox.api import db\n'), ((2716, 2728), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (2726, 2728), False, 'from nesi.devices.softbox.api import db\n'), ((2787, 2799), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (2797, 2799), False, 'from nesi.devices.softbox.api import db\n'), ((2859, 2871), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (2869, 2871), False, 'from nesi.devices.softbox.api import db\n'), ((2918, 2930), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (2928, 2930), False, 'from nesi.devices.softbox.api import db\n'), ((2977, 2989), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (2987, 2989), False, 'from nesi.devices.softbox.api import db\n'), ((3029, 3041), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3039, 3041), False, 'from nesi.devices.softbox.api import db\n'), ((3081, 3093), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3091, 3093), False, 'from nesi.devices.softbox.api import db\n'), ((3133, 3145), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3143, 3145), False, 'from nesi.devices.softbox.api import db\n'), ((3207, 3219), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3217, 3219), False, 'from nesi.devices.softbox.api import db\n'), ((3277, 3289), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3287, 3289), False, 'from nesi.devices.softbox.api import db\n'), ((3333, 3345), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3343, 3345), False, 'from nesi.devices.softbox.api import db\n'), ((3389, 3401), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3399, 3401), False, 'from nesi.devices.softbox.api import db\n'), ((3454, 3466), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3464, 3466), False, 'from nesi.devices.softbox.api import db\n'), ((3519, 3531), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3529, 3531), False, 'from nesi.devices.softbox.api import db\n'), ((3589, 3601), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3599, 3601), False, 'from nesi.devices.softbox.api import db\n'), ((3663, 3675), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3673, 3675), False, 'from nesi.devices.softbox.api import db\n'), ((3723, 3734), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (3732, 3734), False, 'from nesi.devices.softbox.api import db\n'), ((3788, 3800), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3798, 3800), False, 'from nesi.devices.softbox.api import db\n'), ((3845, 3857), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3855, 3857), False, 'from nesi.devices.softbox.api import db\n'), ((3902, 3914), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3912, 3914), False, 'from nesi.devices.softbox.api import db\n'), ((3962, 3974), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (3972, 3974), False, 'from nesi.devices.softbox.api import db\n'), ((4020, 4031), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (4029, 4031), False, 'from nesi.devices.softbox.api import db\n'), ((4073, 4084), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (4082, 4084), False, 'from nesi.devices.softbox.api import db\n'), ((4123, 4134), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (4132, 4134), False, 'from nesi.devices.softbox.api import db\n'), ((4174, 4185), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (4183, 4185), False, 'from nesi.devices.softbox.api import db\n'), ((4225, 4236), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (4234, 4236), False, 'from nesi.devices.softbox.api import db\n'), ((4276, 4287), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (4285, 4287), False, 'from nesi.devices.softbox.api import db\n'), ((4327, 4338), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (4336, 4338), False, 'from nesi.devices.softbox.api import db\n'), ((4378, 4389), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (4387, 4389), False, 'from nesi.devices.softbox.api import db\n'), ((4436, 4447), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (4445, 4447), False, 'from nesi.devices.softbox.api import db\n'), ((4484, 4495), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (4493, 4495), False, 'from nesi.devices.softbox.api import db\n'), ((4540, 4552), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (4550, 4552), False, 'from nesi.devices.softbox.api import db\n'), ((4601, 4612), 'nesi.devices.softbox.api.db.String', 'db.String', ([], {}), '()\n', (4610, 4612), False, 'from nesi.devices.softbox.api import db\n'), ((4656, 4668), 'nesi.devices.softbox.api.db.Integer', 'db.Integer', ([], {}), '()\n', (4666, 4668), False, 'from nesi.devices.softbox.api import db\n'), ((4711, 4739), 'nesi.devices.softbox.api.db.Enum', 'db.Enum', (['"""enable"""', '"""disable"""'], {}), "('enable', 'disable')\n", (4718, 4739), False, 'from nesi.devices.softbox.api import db\n'), ((4782, 4810), 'nesi.devices.softbox.api.db.Enum', 'db.Enum', (['"""enable"""', '"""disable"""'], {}), "('enable', 'disable')\n", (4789, 4810), False, 'from nesi.devices.softbox.api import db\n'), ((4853, 4881), 'nesi.devices.softbox.api.db.Enum', 'db.Enum', (['"""enable"""', '"""disable"""'], {}), "('enable', 'disable')\n", (4860, 4881), False, 'from nesi.devices.softbox.api import db\n'), ((4922, 4954), 'nesi.devices.softbox.api.db.Enum', 'db.Enum', (['"""forwarding"""', '"""discard"""'], {}), "('forwarding', 'discard')\n", (4929, 4954), False, 'from nesi.devices.softbox.api import db\n'), ((5010, 5039), 'nesi.devices.softbox.api.db.Enum', 'db.Enum', (['"""forward"""', '"""discard"""'], {}), "('forward', 'discard')\n", (5017, 5039), False, 'from nesi.devices.softbox.api import db\n'), ((5093, 5122), 'nesi.devices.softbox.api.db.Enum', 'db.Enum', (['"""forward"""', '"""discard"""'], {}), "('forward', 'discard')\n", (5100, 5122), False, 'from nesi.devices.softbox.api import db\n'), ((5179, 5207), 'nesi.devices.softbox.api.db.Enum', 'db.Enum', (['"""enable"""', '"""disable"""'], {}), "('enable', 'disable')\n", (5186, 5207), False, 'from nesi.devices.softbox.api import db\n'), ((5265, 5293), 'nesi.devices.softbox.api.db.Enum', 'db.Enum', (['"""enable"""', '"""disable"""'], {}), "('enable', 'disable')\n", (5272, 5293), False, 'from nesi.devices.softbox.api import db\n'), ((5339, 5361), 'nesi.devices.softbox.api.db.Enum', 'db.Enum', (['"""transparent"""'], {}), "('transparent')\n", (5346, 5361), False, 'from nesi.devices.softbox.api import db\n'), ((5400, 5412), 'nesi.devices.softbox.api.db.Boolean', 'db.Boolean', ([], {}), '()\n', (5410, 5412), False, 'from nesi.devices.softbox.api import db\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# **
#
# ================== #
# CONNECTION_HANDLER #
# ================== #
# Handler class for controlling the connection to the robot
#
# @author ES
# **
import logging
import threading
from autobahn.twisted.component import Component, run
from twisted.internet.defer import inlineCallbacks
import es_common.utils.config_helper as config_helper
from es_common.model.observable import Observable
class ConnectionHandler(object):
def __init__(self):
self.logger = logging.getLogger("Connection Handler")
self.rie = None
self.session_observers = Observable()
self.session = None
@inlineCallbacks
def on_connect(self, session, details=None):
self.logger.debug("Created session: {}".format(session))
self.session = session
yield self.session_observers.notify_all(session)
def start_rie_session(self, robot_name=None, robot_realm=None):
try:
if robot_realm is None:
# get the realm from config
name_key = "pepper" if robot_name is None else robot_name.lower()
robot_realm = config_helper.get_robot_settings()["realm"][name_key]
self.logger.info("{} REALM: {}".format(robot_name, robot_realm))
self.rie = Component(
transports=[{
'url': u"wss://wamp.robotsindeklas.nl",
'serializers': ['msgpack'],
'max_retries': 0
}],
realm=robot_realm
)
self.logger.info("** {}".format(threading.current_thread().name))
self.rie.on_join(self.on_connect)
self.logger.info("Running the rie component")
run([self.rie])
except Exception as e:
self.logger.error("Unable to run the rie component | {}".format(e))
def stop_session(self):
try:
if self.session:
self.session.leave()
self.session_observers.notify_all(None)
self.logger.info("Closed the robot session.")
else:
self.logger.info("There is no active session.")
except Exception as e:
self.logger.error("Error while closing rie session: {}".format(e))
|
[
"autobahn.twisted.component.Component",
"es_common.model.observable.Observable",
"logging.getLogger",
"es_common.utils.config_helper.get_robot_settings",
"threading.current_thread",
"autobahn.twisted.component.run"
] |
[((527, 566), 'logging.getLogger', 'logging.getLogger', (['"""Connection Handler"""'], {}), "('Connection Handler')\n", (544, 566), False, 'import logging\n'), ((624, 636), 'es_common.model.observable.Observable', 'Observable', ([], {}), '()\n', (634, 636), False, 'from es_common.model.observable import Observable\n'), ((1318, 1451), 'autobahn.twisted.component.Component', 'Component', ([], {'transports': "[{'url': u'wss://wamp.robotsindeklas.nl', 'serializers': ['msgpack'],\n 'max_retries': 0}]", 'realm': 'robot_realm'}), "(transports=[{'url': u'wss://wamp.robotsindeklas.nl',\n 'serializers': ['msgpack'], 'max_retries': 0}], realm=robot_realm)\n", (1327, 1451), False, 'from autobahn.twisted.component import Component, run\n'), ((1768, 1783), 'autobahn.twisted.component.run', 'run', (['[self.rie]'], {}), '([self.rie])\n', (1771, 1783), False, 'from autobahn.twisted.component import Component, run\n'), ((1163, 1197), 'es_common.utils.config_helper.get_robot_settings', 'config_helper.get_robot_settings', ([], {}), '()\n', (1195, 1197), True, 'import es_common.utils.config_helper as config_helper\n'), ((1617, 1643), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (1641, 1643), False, 'import threading\n')]
|
"""
Módulo da aplicação usando Streamlit para gerar a estrutura front-end
"""
# FIXME: Por algum motivo o streamlit não aceita importar as páginas
# via __init__.py ou importação relativa
# pylint: disable=import-error
import streamlit as st
from introducao import intro
from questao_problema import case
from analise_geografica import geografica
from analise_prazos_x_atrasos import prazos_atrasos
from report import report
from solucoes import solucoes
from consideracoes_finais import consideracoes_finais
# pylint: enable=import-error
PAGES = {
"Introdução": intro,
"Questão Problema": case,
"Análise Geográfica das Vendas e Compras": geografica,
"Análise dos Atrasos dos Pedidos": prazos_atrasos,
"Pandas Profiling": report,
"Relatório Final e Soluções Propostas": solucoes,
"Considerações": consideracoes_finais,
}
st.sidebar.title("Índice")
selection = st.sidebar.radio("", list(PAGES.keys()))
page = PAGES[selection]
page()
|
[
"streamlit.sidebar.title"
] |
[((852, 878), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Índice"""'], {}), "('Índice')\n", (868, 878), True, 'import streamlit as st\n')]
|
# Generated by Django 3.2.4 on 2021-06-18 19:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Messages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=240, verbose_name='Name')),
('message', models.CharField(max_length=240, verbose_name='Name')),
],
),
migrations.RemoveField(
model_name='customer',
name='created',
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.models.CharField",
"django.db.models.AutoField"
] |
[((613, 674), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""customer"""', 'name': '"""created"""'}), "(model_name='customer', name='created')\n", (635, 674), False, 'from django.db import migrations, models\n'), ((322, 415), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (338, 415), False, 'from django.db import migrations, models\n'), ((439, 492), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(240)', 'verbose_name': '"""Name"""'}), "(max_length=240, verbose_name='Name')\n", (455, 492), False, 'from django.db import migrations, models\n'), ((523, 576), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(240)', 'verbose_name': '"""Name"""'}), "(max_length=240, verbose_name='Name')\n", (539, 576), False, 'from django.db import migrations, models\n')]
|
import logging
from uuid import UUID
from fastapi import APIRouter, Depends
from sqlalchemy.ext.asyncio import AsyncSession
from starlette import status
from starlette.requests import Request
from api.core.config import settings
from api.endpoints.dependencies.db import get_db
from api.endpoints.dependencies.tenant_security import get_from_context
from api.endpoints.routes.v1.link_utils import build_list_links
from api.services.v1 import governance_service
from api.endpoints.models.v1.governance import (
SchemaTemplateListResponse,
SchemaTemplateListParameters,
CreateSchemaTemplatePayload,
CreateSchemaTemplateResponse,
ImportSchemaTemplatePayload,
ImportSchemaTemplateResponse,
TemplateStatusType,
)
from api.tasks import SendCredDefRequestTask, SendSchemaRequestTask
router = APIRouter()
logger = logging.getLogger(__name__)
@router.get(
"/", status_code=status.HTTP_200_OK, response_model=SchemaTemplateListResponse
)
async def list_schema_templates(
request: Request,
page_num: int | None = 1,
page_size: int | None = settings.DEFAULT_PAGE_SIZE,
name: str | None = None,
schema_id: str | None = None,
schema_template_id: UUID | None = None,
status: TemplateStatusType | None = None,
tags: str | None = None,
deleted: bool | None = False,
db: AsyncSession = Depends(get_db),
) -> SchemaTemplateListResponse:
wallet_id = get_from_context("TENANT_WALLET_ID")
tenant_id = get_from_context("TENANT_ID")
parameters = SchemaTemplateListParameters(
url=str(request.url),
page_num=page_num,
page_size=page_size,
name=name,
deleted=deleted,
schema_id=schema_id,
schema_template_id=schema_template_id,
status=status,
tags=tags,
)
items, total_count = await governance_service.list_schema_templates(
db, tenant_id, wallet_id, parameters
)
links = build_list_links(total_count, parameters)
return SchemaTemplateListResponse(
items=items, count=len(items), total=total_count, links=links
)
@router.post("/", status_code=status.HTTP_200_OK)
async def create_schema_template(
payload: CreateSchemaTemplatePayload,
db: AsyncSession = Depends(get_db),
) -> CreateSchemaTemplateResponse:
"""
Create a new schema and/or credential definition.
"schema_definition", defines the new schema.
If "credential_definition" is provided, create a credential definition.
"""
logger.info("> create_schema_template()")
wallet_id = get_from_context("TENANT_WALLET_ID")
tenant_id = get_from_context("TENANT_ID")
logger.debug(f"wallet_id = {wallet_id}")
logger.debug(f"tenant_id = {tenant_id}")
item, c_t_item = await governance_service.create_schema_template(
db, tenant_id, wallet_id, payload=payload
)
links = [] # TODO
# this will kick off the call to the ledger and then event listeners will finish
# populating the schema (and cred def) data.
logger.debug("> > SendSchemaRequestTask.assign()")
await SendSchemaRequestTask.assign(
tenant_id, wallet_id, payload.schema_definition, item.schema_template_id
)
logger.debug("< < SendSchemaRequestTask.assign()")
logger.debug(f"item = {item}")
logger.debug(f"credential_template = {c_t_item}")
logger.info("< create_schema_template()")
return CreateSchemaTemplateResponse(
item=item, credential_template=c_t_item, links=links
)
@router.post("/import", status_code=status.HTTP_200_OK)
async def import_schema_template(
payload: ImportSchemaTemplatePayload,
db: AsyncSession = Depends(get_db),
) -> ImportSchemaTemplateResponse:
"""
Import an existing public schema and optionally create a credential definition.
"schema_id" is the ledger's schema id.
If "credential_definition" is provided, create a credential definition.
"""
logger.info("> import_schema_template()")
wallet_id = get_from_context("TENANT_WALLET_ID")
tenant_id = get_from_context("TENANT_ID")
logger.debug(f"wallet_id = {wallet_id}")
logger.debug(f"tenant_id = {tenant_id}")
item, c_t_item = await governance_service.import_schema_template(
db, tenant_id, wallet_id, payload=payload
)
links = [] # TODO
# this will kick off the call to the ledger and then event listeners will finish
# populating the cred def
if c_t_item:
logger.debug("> > SendCredDefRequestTask.assign()")
await SendCredDefRequestTask.assign(
tenant_id, wallet_id, c_t_item.credential_template_id
)
logger.debug("< < SendCredDefRequestTask.assign()")
logger.debug(f"item = {item}")
logger.debug(f"credential_template = {c_t_item}")
logger.info("< import_schema_template()")
return ImportSchemaTemplateResponse(
item=item, credential_template=c_t_item, links=links
)
|
[
"api.endpoints.dependencies.tenant_security.get_from_context",
"api.endpoints.models.v1.governance.CreateSchemaTemplateResponse",
"api.endpoints.routes.v1.link_utils.build_list_links",
"api.endpoints.models.v1.governance.ImportSchemaTemplateResponse",
"api.services.v1.governance_service.import_schema_template",
"api.services.v1.governance_service.create_schema_template",
"api.tasks.SendSchemaRequestTask.assign",
"api.services.v1.governance_service.list_schema_templates",
"fastapi.Depends",
"api.tasks.SendCredDefRequestTask.assign",
"logging.getLogger",
"fastapi.APIRouter"
] |
[((818, 829), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (827, 829), False, 'from fastapi import APIRouter, Depends\n'), ((839, 866), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (856, 866), False, 'import logging\n'), ((1347, 1362), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (1354, 1362), False, 'from fastapi import APIRouter, Depends\n'), ((1413, 1449), 'api.endpoints.dependencies.tenant_security.get_from_context', 'get_from_context', (['"""TENANT_WALLET_ID"""'], {}), "('TENANT_WALLET_ID')\n", (1429, 1449), False, 'from api.endpoints.dependencies.tenant_security import get_from_context\n'), ((1466, 1495), 'api.endpoints.dependencies.tenant_security.get_from_context', 'get_from_context', (['"""TENANT_ID"""'], {}), "('TENANT_ID')\n", (1482, 1495), False, 'from api.endpoints.dependencies.tenant_security import get_from_context\n'), ((1935, 1976), 'api.endpoints.routes.v1.link_utils.build_list_links', 'build_list_links', (['total_count', 'parameters'], {}), '(total_count, parameters)\n', (1951, 1976), False, 'from api.endpoints.routes.v1.link_utils import build_list_links\n'), ((2244, 2259), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (2251, 2259), False, 'from fastapi import APIRouter, Depends\n'), ((2554, 2590), 'api.endpoints.dependencies.tenant_security.get_from_context', 'get_from_context', (['"""TENANT_WALLET_ID"""'], {}), "('TENANT_WALLET_ID')\n", (2570, 2590), False, 'from api.endpoints.dependencies.tenant_security import get_from_context\n'), ((2607, 2636), 'api.endpoints.dependencies.tenant_security.get_from_context', 'get_from_context', (['"""TENANT_ID"""'], {}), "('TENANT_ID')\n", (2623, 2636), False, 'from api.endpoints.dependencies.tenant_security import get_from_context\n'), ((3395, 3482), 'api.endpoints.models.v1.governance.CreateSchemaTemplateResponse', 'CreateSchemaTemplateResponse', ([], {'item': 'item', 'credential_template': 'c_t_item', 'links': 'links'}), '(item=item, credential_template=c_t_item, links\n =links)\n', (3423, 3482), False, 'from api.endpoints.models.v1.governance import SchemaTemplateListResponse, SchemaTemplateListParameters, CreateSchemaTemplatePayload, CreateSchemaTemplateResponse, ImportSchemaTemplatePayload, ImportSchemaTemplateResponse, TemplateStatusType\n'), ((3649, 3664), 'fastapi.Depends', 'Depends', (['get_db'], {}), '(get_db)\n', (3656, 3664), False, 'from fastapi import APIRouter, Depends\n'), ((3983, 4019), 'api.endpoints.dependencies.tenant_security.get_from_context', 'get_from_context', (['"""TENANT_WALLET_ID"""'], {}), "('TENANT_WALLET_ID')\n", (3999, 4019), False, 'from api.endpoints.dependencies.tenant_security import get_from_context\n'), ((4036, 4065), 'api.endpoints.dependencies.tenant_security.get_from_context', 'get_from_context', (['"""TENANT_ID"""'], {}), "('TENANT_ID')\n", (4052, 4065), False, 'from api.endpoints.dependencies.tenant_security import get_from_context\n'), ((4827, 4914), 'api.endpoints.models.v1.governance.ImportSchemaTemplateResponse', 'ImportSchemaTemplateResponse', ([], {'item': 'item', 'credential_template': 'c_t_item', 'links': 'links'}), '(item=item, credential_template=c_t_item, links\n =links)\n', (4855, 4914), False, 'from api.endpoints.models.v1.governance import SchemaTemplateListResponse, SchemaTemplateListParameters, CreateSchemaTemplatePayload, CreateSchemaTemplateResponse, ImportSchemaTemplatePayload, ImportSchemaTemplateResponse, TemplateStatusType\n'), ((1829, 1907), 'api.services.v1.governance_service.list_schema_templates', 'governance_service.list_schema_templates', (['db', 'tenant_id', 'wallet_id', 'parameters'], {}), '(db, tenant_id, wallet_id, parameters)\n', (1869, 1907), False, 'from api.services.v1 import governance_service\n'), ((2755, 2844), 'api.services.v1.governance_service.create_schema_template', 'governance_service.create_schema_template', (['db', 'tenant_id', 'wallet_id'], {'payload': 'payload'}), '(db, tenant_id, wallet_id, payload\n =payload)\n', (2796, 2844), False, 'from api.services.v1 import governance_service\n'), ((3077, 3184), 'api.tasks.SendSchemaRequestTask.assign', 'SendSchemaRequestTask.assign', (['tenant_id', 'wallet_id', 'payload.schema_definition', 'item.schema_template_id'], {}), '(tenant_id, wallet_id, payload.\n schema_definition, item.schema_template_id)\n', (3105, 3184), False, 'from api.tasks import SendCredDefRequestTask, SendSchemaRequestTask\n'), ((4184, 4273), 'api.services.v1.governance_service.import_schema_template', 'governance_service.import_schema_template', (['db', 'tenant_id', 'wallet_id'], {'payload': 'payload'}), '(db, tenant_id, wallet_id, payload\n =payload)\n', (4225, 4273), False, 'from api.services.v1 import governance_service\n'), ((4513, 4602), 'api.tasks.SendCredDefRequestTask.assign', 'SendCredDefRequestTask.assign', (['tenant_id', 'wallet_id', 'c_t_item.credential_template_id'], {}), '(tenant_id, wallet_id, c_t_item.\n credential_template_id)\n', (4542, 4602), False, 'from api.tasks import SendCredDefRequestTask, SendSchemaRequestTask\n')]
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2018, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import sys
import simplejson as json
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from pgadmin.utils import server_utils as server_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
if sys.version_info < (3, 3):
from mock import patch, MagicMock
else:
from unittest.mock import patch, MagicMock
class RestoreCreateJobTest(BaseTestGenerator):
"""Test the RestoreCreateJob class"""
scenarios = [
('When restore object with default options',
dict(
class_params=dict(
sid=1,
name='test_restore_server',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_restore_file',
format='custom',
custom=False,
verbose=True,
blobs=True,
schemas=[],
tables=[],
database='postgres'
),
url='/restore/job/{0}',
expected_cmd_opts=['--verbose'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None]
)),
('When restore object with format directory',
dict(
class_params=dict(
sid=1,
name='test_restore_server',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_restore_file',
format='directory',
custom=False,
verbose=True,
blobs=False,
schemas=[],
tables=[],
database='postgres'
),
url='/restore/job/{0}',
expected_cmd_opts=['--verbose', '--format=d'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None]
)),
('When restore object with the sections options',
dict(
class_params=dict(
sid=1,
name='test_restore_server',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_restore_file',
format='custom',
no_of_jobs='2',
custom=False,
verbose=True,
schemas=[],
tables=[],
database='postgres',
data=True,
pre_data=True,
post_data=True,
only_data=True,
only_schema=True
),
url='/restore/job/{0}',
expected_cmd_opts=['--verbose', '--jobs', '2',
'--section=pre-data', '--section=data',
'--section=post-data'],
not_expected_cmd_opts=[],
# Below options should be enabled once we fix the issue #3368
# not_expected_cmd_opts=['--data-only', '--schema-only'],
expected_exit_code=[0, None],
)),
('When restore the object with Type of objects',
dict(
class_params=dict(
sid=1,
name='test_restore_server',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_restore_file',
format='custom',
no_of_jobs='2',
custom=False,
verbose=True,
schemas=[],
tables=[],
database='postgres',
only_data=True,
only_schema=True,
dns_owner=True
),
url='/restore/job/{0}',
expected_cmd_opts=['--verbose', '--data-only'],
not_expected_cmd_opts=[],
# Below options should be enabled once we fix the issue #3368
# not_expected_cmd_opts=['--schema-only', '--no-owner'],
expected_exit_code=[0, None],
)),
('When restore object with option - Do not save',
dict(
class_params=dict(
sid=1,
name='test_restore_server',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_restore_file',
format='custom',
verbose=True,
custom=False,
schemas=[],
tables=[],
database='postgres',
dns_owner=True,
dns_privilege=True,
dns_tablespace=True,
only_data=False
),
url='/restore/job/{0}',
expected_cmd_opts=['--no-owner',
'--no-tablespaces',
'--no-privileges'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None]
)),
('When restore object with option - Do not save comments',
dict(
class_params=dict(
sid=1,
name='test_restore_server',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_restore_file',
format='custom',
verbose=True,
custom=False,
schemas=[],
tables=[],
database='postgres',
no_comments=True,
only_data=False
),
url='/restore/job/{0}',
expected_cmd_opts=['--no-comments'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None],
server_min_version=110000,
message='Restore object with --no-comments are not supported '
'by EPAS/PG server less than 11.0'
)),
('When restore object with option - Queries',
dict(
class_params=dict(
sid=1,
name='test_restore_file',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_backup_file',
format='custom',
verbose=True,
schemas=[],
tables=[],
database='postgres',
clean=True,
include_create_database=True,
single_transaction=True,
),
url='/restore/job/{0}',
expected_cmd_opts=['--create', '--clean',
'--single-transaction'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None]
)),
('When restore object with option - Disbale',
dict(
class_params=dict(
sid=1,
name='test_restore_file',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_backup_file',
format='custom',
verbose=True,
schemas=[],
tables=[],
database='postgres',
disable_trigger=True,
no_data_fail_table=True,
only_schema=False
),
url='/restore/job/{0}',
expected_cmd_opts=['--disable-triggers',
'--no-data-for-failed-tables'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None]
)),
('When restore object with option - Miscellaneous',
dict(
class_params=dict(
sid=1,
name='test_restore_file',
port=5444,
host='localhost',
database='postgres',
bfile='test_restore',
username='postgres'
),
params=dict(
file='test_backup_file',
format='custom',
verbose=True,
schemas=[],
tables=[],
database='postgres',
use_set_session_auth=True,
exit_on_error=True,
),
url='/restore/job/{0}',
# Add '--use_set_session_auth' into
# expected_cmd_opts once #3363 fixed
expected_cmd_opts=['--exit-on-error'],
not_expected_cmd_opts=[],
expected_exit_code=[0, None]
)),
]
def setUp(self):
if self.server['default_binary_paths'] is None:
self.skipTest(
"default_binary_paths is not set for the server {0}".format(
self.server['name']
)
)
@patch('pgadmin.tools.restore.Server')
@patch('pgadmin.tools.restore.current_user')
@patch('pgadmin.tools.restore.RestoreMessage')
@patch('pgadmin.tools.restore.filename_with_file_manager_path')
@patch('pgadmin.tools.restore.BatchProcess')
@patch('pgadmin.utils.driver.psycopg2.server_manager.ServerManager.'
'export_password_env')
def runTest(self, export_password_env_mock, batch_process_mock,
filename_mock, restore_message_mock,
current_user_mock, server_mock):
class TestMockServer():
def __init__(self, name, host, port, id, username):
self.name = name
self.host = host
self.port = port
self.id = id
self.username = username
self.db_name = ''
self.server_id = parent_node_dict["server"][-1]["server_id"]
mock_obj = TestMockServer(self.class_params['name'],
self.class_params['host'],
self.class_params['port'],
self.server_id,
self.class_params['username']
)
mock_result = server_mock.query.filter_by.return_value
mock_result.first.return_value = mock_obj
filename_mock.return_value = self.params['file']
batch_process_mock.set_env_variables = MagicMock(
return_value=True
)
batch_process_mock.start = MagicMock(
return_value=True
)
export_password_env_mock.return_value = True
server_response = server_utils.connect_server(self, self.server_id)
if server_response["info"] == "Server connected.":
db_owner = server_response['data']['user']['name']
self.data = database_utils.get_db_data(db_owner)
self.db_name = self.data['name']
if hasattr(self, 'server_min_version') and \
server_response["data"]["version"] < \
self.server_min_version:
self.skipTest(self.message)
url = self.url.format(self.server_id)
# Create the restore job
response = self.tester.post(url,
data=json.dumps(self.params),
content_type='html/json')
self.assertEqual(response.status_code, 200)
self.assertTrue(restore_message_mock.called)
self.assertTrue(batch_process_mock.called)
if self.expected_cmd_opts:
for opt in self.expected_cmd_opts:
self.assertIn(
opt,
batch_process_mock.call_args_list[0][1]['args']
)
if self.not_expected_cmd_opts:
for opt in self.not_expected_cmd_opts:
self.assertNotIn(
opt,
batch_process_mock.call_args_list[0][1]['args']
)
|
[
"pgadmin.browser.server_groups.servers.databases.tests.utils.get_db_data",
"unittest.mock.MagicMock",
"simplejson.dumps",
"unittest.mock.patch",
"pgadmin.utils.server_utils.connect_server"
] |
[((10248, 10285), 'unittest.mock.patch', 'patch', (['"""pgadmin.tools.restore.Server"""'], {}), "('pgadmin.tools.restore.Server')\n", (10253, 10285), False, 'from unittest.mock import patch, MagicMock\n'), ((10291, 10334), 'unittest.mock.patch', 'patch', (['"""pgadmin.tools.restore.current_user"""'], {}), "('pgadmin.tools.restore.current_user')\n", (10296, 10334), False, 'from unittest.mock import patch, MagicMock\n'), ((10340, 10385), 'unittest.mock.patch', 'patch', (['"""pgadmin.tools.restore.RestoreMessage"""'], {}), "('pgadmin.tools.restore.RestoreMessage')\n", (10345, 10385), False, 'from unittest.mock import patch, MagicMock\n'), ((10391, 10453), 'unittest.mock.patch', 'patch', (['"""pgadmin.tools.restore.filename_with_file_manager_path"""'], {}), "('pgadmin.tools.restore.filename_with_file_manager_path')\n", (10396, 10453), False, 'from unittest.mock import patch, MagicMock\n'), ((10459, 10502), 'unittest.mock.patch', 'patch', (['"""pgadmin.tools.restore.BatchProcess"""'], {}), "('pgadmin.tools.restore.BatchProcess')\n", (10464, 10502), False, 'from unittest.mock import patch, MagicMock\n'), ((10508, 10605), 'unittest.mock.patch', 'patch', (['"""pgadmin.utils.driver.psycopg2.server_manager.ServerManager.export_password_env"""'], {}), "(\n 'pgadmin.utils.driver.psycopg2.server_manager.ServerManager.export_password_env'\n )\n", (10513, 10605), False, 'from unittest.mock import patch, MagicMock\n'), ((11694, 11722), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (11703, 11722), False, 'from unittest.mock import patch, MagicMock\n'), ((11780, 11808), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (11789, 11808), False, 'from unittest.mock import patch, MagicMock\n'), ((11912, 11961), 'pgadmin.utils.server_utils.connect_server', 'server_utils.connect_server', (['self', 'self.server_id'], {}), '(self, self.server_id)\n', (11939, 11961), True, 'from pgadmin.utils import server_utils as server_utils\n'), ((12108, 12144), 'pgadmin.browser.server_groups.servers.databases.tests.utils.get_db_data', 'database_utils.get_db_data', (['db_owner'], {}), '(db_owner)\n', (12134, 12144), True, 'from pgadmin.browser.server_groups.servers.databases.tests import utils as database_utils\n'), ((12559, 12582), 'simplejson.dumps', 'json.dumps', (['self.params'], {}), '(self.params)\n', (12569, 12582), True, 'import simplejson as json\n')]
|
"""
A set of utilities for setting up property estimation workflows.
"""
from dataclasses import astuple, dataclass
from typing import Generic, Optional, Tuple, TypeVar
from openff.evaluator import unit
from openff.evaluator.attributes import PlaceholderValue
from openff.evaluator.datasets import PropertyPhase
from openff.evaluator.protocols import (
analysis,
coordinates,
forcefield,
gradients,
groups,
miscellaneous,
openmm,
reweighting,
storage,
)
from openff.evaluator.protocols.groups import ConditionalGroup
from openff.evaluator.storage.data import StoredSimulationData
from openff.evaluator.thermodynamics import Ensemble
from openff.evaluator.utils.observables import ObservableType
from openff.evaluator.workflow import ProtocolGroup
from openff.evaluator.workflow.schemas import ProtocolReplicator
from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue
S = TypeVar("S", bound=analysis.BaseAverageObservable)
T = TypeVar("T", bound=reweighting.BaseMBARProtocol)
@dataclass
class SimulationProtocols(Generic[S]):
"""The common set of protocols which would be required to estimate an observable
by running a new molecule simulation."""
build_coordinates: coordinates.BuildCoordinatesPackmol
assign_parameters: forcefield.BaseBuildSystem
energy_minimisation: openmm.OpenMMEnergyMinimisation
equilibration_simulation: openmm.OpenMMSimulation
production_simulation: openmm.OpenMMSimulation
analysis_protocol: S
converge_uncertainty: ProtocolGroup
decorrelate_trajectory: analysis.DecorrelateTrajectory
decorrelate_observables: analysis.DecorrelateObservables
def __iter__(self):
yield from astuple(self)
@dataclass
class ReweightingProtocols(Generic[S, T]):
"""The common set of protocols which would be required to re-weight an observable
from cached simulation data."""
unpack_stored_data: storage.UnpackStoredSimulationData
join_trajectories: reweighting.ConcatenateTrajectories
join_observables: reweighting.ConcatenateObservables
build_reference_system: forcefield.BaseBuildSystem
evaluate_reference_potential: reweighting.BaseEvaluateEnergies
build_target_system: forcefield.BaseBuildSystem
evaluate_target_potential: reweighting.BaseEvaluateEnergies
statistical_inefficiency: S
replicate_statistics: miscellaneous.DummyProtocol
decorrelate_reference_potential: analysis.DecorrelateObservables
decorrelate_target_potential: analysis.DecorrelateObservables
decorrelate_observable: analysis.DecorrelateObservables
zero_gradients: Optional[gradients.ZeroGradients]
reweight_observable: T
def __iter__(self):
yield from astuple(self)
def generate_base_reweighting_protocols(
statistical_inefficiency: S,
reweight_observable: T,
replicator_id: str = "data_replicator",
id_suffix: str = "",
) -> Tuple[ReweightingProtocols[S, T], ProtocolReplicator]:
"""Constructs a set of protocols which, when combined in a workflow schema, may be
executed to reweight a set of cached simulation data to estimate the average
value of an observable.
Parameters
----------
statistical_inefficiency
The protocol which will be used to compute the statistical inefficiency and
equilibration time of the observable of interest. This information will be
used to decorrelate the cached data prior to reweighting.
reweight_observable
The MBAR reweighting protocol to use to reweight the observable to the target
state. This method will automatically set the reduced potentials on the
object.
replicator_id: str
The id to use for the cached data replicator.
id_suffix: str
A string suffix to append to each of the protocol ids.
Returns
-------
The protocols to add to the workflow, a reference to the average value of the
estimated observable (an ``Observable`` object), and the replicator which will
clone the workflow for each piece of cached simulation data.
"""
# Create the replicator which will apply these protocol once for each piece of
# cached simulation data.
data_replicator = ProtocolReplicator(replicator_id=replicator_id)
data_replicator.template_values = ProtocolPath("full_system_data", "global")
# Validate the inputs.
assert isinstance(statistical_inefficiency, analysis.BaseAverageObservable)
assert data_replicator.placeholder_id in statistical_inefficiency.id
assert data_replicator.placeholder_id not in reweight_observable.id
replicator_suffix = f"_{data_replicator.placeholder_id}{id_suffix}"
# Unpack all the of the stored data.
unpack_stored_data = storage.UnpackStoredSimulationData(
"unpack_data{}".format(replicator_suffix)
)
unpack_stored_data.simulation_data_path = ReplicatorValue(replicator_id)
# Join the individual trajectories together.
join_trajectories = reweighting.ConcatenateTrajectories(
f"join_trajectories{id_suffix}"
)
join_trajectories.input_coordinate_paths = ProtocolPath(
"coordinate_file_path", unpack_stored_data.id
)
join_trajectories.input_trajectory_paths = ProtocolPath(
"trajectory_file_path", unpack_stored_data.id
)
join_observables = reweighting.ConcatenateObservables(
f"join_observables{id_suffix}"
)
join_observables.input_observables = ProtocolPath(
"observables", unpack_stored_data.id
)
# Calculate the reduced potentials for each of the reference states.
build_reference_system = forcefield.BaseBuildSystem(
f"build_system{replicator_suffix}"
)
build_reference_system.force_field_path = ProtocolPath(
"force_field_path", unpack_stored_data.id
)
build_reference_system.coordinate_file_path = ProtocolPath(
"coordinate_file_path", unpack_stored_data.id
)
build_reference_system.substance = ProtocolPath("substance", unpack_stored_data.id)
reduced_reference_potential = openmm.OpenMMEvaluateEnergies(
f"reduced_potential{replicator_suffix}"
)
reduced_reference_potential.parameterized_system = ProtocolPath(
"parameterized_system", build_reference_system.id
)
reduced_reference_potential.thermodynamic_state = ProtocolPath(
"thermodynamic_state", unpack_stored_data.id
)
reduced_reference_potential.coordinate_file_path = ProtocolPath(
"coordinate_file_path", unpack_stored_data.id
)
reduced_reference_potential.trajectory_file_path = ProtocolPath(
"output_trajectory_path", join_trajectories.id
)
# Calculate the reduced potential of the target state.
build_target_system = forcefield.BaseBuildSystem(f"build_system_target{id_suffix}")
build_target_system.force_field_path = ProtocolPath("force_field_path", "global")
build_target_system.substance = ProtocolPath("substance", "global")
build_target_system.coordinate_file_path = ProtocolPath(
"output_coordinate_path", join_trajectories.id
)
reduced_target_potential = openmm.OpenMMEvaluateEnergies(
f"reduced_potential_target{id_suffix}"
)
reduced_target_potential.thermodynamic_state = ProtocolPath(
"thermodynamic_state", "global"
)
reduced_target_potential.parameterized_system = ProtocolPath(
"parameterized_system", build_target_system.id
)
reduced_target_potential.coordinate_file_path = ProtocolPath(
"output_coordinate_path", join_trajectories.id
)
reduced_target_potential.trajectory_file_path = ProtocolPath(
"output_trajectory_path", join_trajectories.id
)
reduced_target_potential.gradient_parameters = ProtocolPath(
"parameter_gradient_keys", "global"
)
# Compute the observable gradients.
zero_gradients = gradients.ZeroGradients(f"zero_gradients{id_suffix}")
zero_gradients.force_field_path = ProtocolPath("force_field_path", "global")
zero_gradients.gradient_parameters = ProtocolPath(
"parameter_gradient_keys", "global"
)
# Decorrelate the target potentials and observables.
if not isinstance(statistical_inefficiency, analysis.BaseAverageObservable):
raise NotImplementedError()
decorrelate_target_potential = analysis.DecorrelateObservables(
f"decorrelate_target_potential{id_suffix}"
)
decorrelate_target_potential.time_series_statistics = ProtocolPath(
"time_series_statistics", statistical_inefficiency.id
)
decorrelate_target_potential.input_observables = ProtocolPath(
"output_observables", reduced_target_potential.id
)
decorrelate_observable = analysis.DecorrelateObservables(
f"decorrelate_observable{id_suffix}"
)
decorrelate_observable.time_series_statistics = ProtocolPath(
"time_series_statistics", statistical_inefficiency.id
)
decorrelate_observable.input_observables = ProtocolPath(
"output_observables", zero_gradients.id
)
# Decorrelate the reference potentials. Due to a quirk of how workflow replicators
# work the time series statistics need to be passed via a dummy protocol first.
#
# Because the `statistical_inefficiency` and `decorrelate_reference_potential`
# protocols are replicated by the same replicator the `time_series_statistics`
# input of `decorrelate_reference_potential_X` will take its value from
# the `time_series_statistics` output of `statistical_inefficiency_X` rather than
# as a list of of [statistical_inefficiency_0.time_series_statistics...
# statistical_inefficiency_N.time_series_statistics]. Passing the statistics via
# an un-replicated intermediate resolves this.
replicate_statistics = miscellaneous.DummyProtocol(
f"replicated_statistics{id_suffix}"
)
replicate_statistics.input_value = ProtocolPath(
"time_series_statistics", statistical_inefficiency.id
)
decorrelate_reference_potential = analysis.DecorrelateObservables(
f"decorrelate_reference_potential{replicator_suffix}"
)
decorrelate_reference_potential.time_series_statistics = ProtocolPath(
"output_value", replicate_statistics.id
)
decorrelate_reference_potential.input_observables = ProtocolPath(
"output_observables", reduced_reference_potential.id
)
# Finally, apply MBAR to get the reweighted value.
reweight_observable.reference_reduced_potentials = ProtocolPath(
"output_observables[ReducedPotential]", decorrelate_reference_potential.id
)
reweight_observable.target_reduced_potentials = ProtocolPath(
"output_observables[ReducedPotential]", decorrelate_target_potential.id
)
reweight_observable.observable = ProtocolPath(
"output_observables", decorrelate_observable.id
)
reweight_observable.frame_counts = ProtocolPath(
"time_series_statistics.n_uncorrelated_points", statistical_inefficiency.id
)
protocols = ReweightingProtocols(
unpack_stored_data,
#
join_trajectories,
join_observables,
#
build_reference_system,
reduced_reference_potential,
#
build_target_system,
reduced_target_potential,
#
statistical_inefficiency,
replicate_statistics,
#
decorrelate_reference_potential,
decorrelate_target_potential,
#
decorrelate_observable,
zero_gradients,
#
reweight_observable,
)
return protocols, data_replicator
def generate_reweighting_protocols(
observable_type: ObservableType,
replicator_id: str = "data_replicator",
id_suffix: str = "",
) -> Tuple[
ReweightingProtocols[analysis.AverageObservable, reweighting.ReweightObservable],
ProtocolReplicator,
]:
assert observable_type not in [
ObservableType.KineticEnergy,
ObservableType.TotalEnergy,
ObservableType.Enthalpy,
]
statistical_inefficiency = analysis.AverageObservable(
f"observable_inefficiency_$({replicator_id}){id_suffix}"
)
statistical_inefficiency.bootstrap_iterations = 1
reweight_observable = reweighting.ReweightObservable(
f"reweight_observable{id_suffix}"
)
protocols, data_replicator = generate_base_reweighting_protocols(
statistical_inefficiency, reweight_observable, replicator_id, id_suffix
)
protocols.statistical_inefficiency.observable = ProtocolPath(
f"observables[{observable_type.value}]", protocols.unpack_stored_data.id
)
if (
observable_type != ObservableType.PotentialEnergy
and observable_type != ObservableType.TotalEnergy
and observable_type != ObservableType.Enthalpy
and observable_type != ObservableType.ReducedPotential
):
protocols.zero_gradients.input_observables = ProtocolPath(
f"output_observables[{observable_type.value}]",
protocols.join_observables.id,
)
else:
protocols.zero_gradients = None
protocols.decorrelate_observable = protocols.decorrelate_target_potential
protocols.reweight_observable.observable = ProtocolPath(
f"output_observables[{observable_type.value}]",
protocols.decorrelate_observable.id,
)
return protocols, data_replicator
def generate_simulation_protocols(
analysis_protocol: S,
use_target_uncertainty: bool,
id_suffix: str = "",
conditional_group: Optional[ConditionalGroup] = None,
n_molecules: int = 1000,
) -> Tuple[SimulationProtocols[S], ProtocolPath, StoredSimulationData]:
"""Constructs a set of protocols which, when combined in a workflow schema, may be
executed to run a single simulation to estimate the average value of an observable.
The protocols returned will:
1) Build a set of liquid coordinates for the
property substance using packmol.
2) Assign a set of smirnoff force field parameters
to the system.
3) Perform an energy minimisation on the system.
4) Run a short NPT equilibration simulation for 100000 steps
using a timestep of 2fs.
5) Within a conditional group (up to a maximum of 100 times):
5a) Run a longer NPT production simulation for 1000000 steps using a
timestep of 2fs
5b) Extract the average value of an observable and it's uncertainty.
5c) If a convergence mode is set by the options, check if the target
uncertainty has been met. If not, repeat steps 5a), 5b) and 5c).
6) Extract uncorrelated configurations from a generated production
simulation.
7) Extract uncorrelated statistics from a generated production
simulation.
Parameters
----------
analysis_protocol
The protocol which will extract the observable of
interest from the generated simulation data.
use_target_uncertainty
Whether to run the simulation until the observable is
estimated to within the target uncertainty.
id_suffix: str
A string suffix to append to each of the protocol ids.
conditional_group: ProtocolGroup, optional
A custom group to wrap the main simulation / extraction
protocols within. It is up to the caller of this method to
manually add the convergence conditions to this group.
If `None`, a default group with uncertainty convergence
conditions is automatically constructed.
n_molecules: int
The number of molecules to use in the workflow.
Returns
-------
The protocols to add to the workflow, a reference to the average value of the
estimated observable (an ``Observable`` object), and an object which describes
the default data from a simulation to store, such as the uncorrelated statistics
and configurations.
"""
build_coordinates = coordinates.BuildCoordinatesPackmol(
f"build_coordinates{id_suffix}"
)
build_coordinates.substance = ProtocolPath("substance", "global")
build_coordinates.max_molecules = n_molecules
assign_parameters = forcefield.BaseBuildSystem(f"assign_parameters{id_suffix}")
assign_parameters.force_field_path = ProtocolPath("force_field_path", "global")
assign_parameters.coordinate_file_path = ProtocolPath(
"coordinate_file_path", build_coordinates.id
)
assign_parameters.substance = ProtocolPath("output_substance", build_coordinates.id)
# Equilibration
energy_minimisation = openmm.OpenMMEnergyMinimisation(
f"energy_minimisation{id_suffix}"
)
energy_minimisation.input_coordinate_file = ProtocolPath(
"coordinate_file_path", build_coordinates.id
)
energy_minimisation.parameterized_system = ProtocolPath(
"parameterized_system", assign_parameters.id
)
equilibration_simulation = openmm.OpenMMSimulation(
f"equilibration_simulation{id_suffix}"
)
equilibration_simulation.ensemble = Ensemble.NPT
equilibration_simulation.steps_per_iteration = 100000
equilibration_simulation.output_frequency = 5000
equilibration_simulation.timestep = 2.0 * unit.femtosecond
equilibration_simulation.thermodynamic_state = ProtocolPath(
"thermodynamic_state", "global"
)
equilibration_simulation.input_coordinate_file = ProtocolPath(
"output_coordinate_file", energy_minimisation.id
)
equilibration_simulation.parameterized_system = ProtocolPath(
"parameterized_system", assign_parameters.id
)
# Production
production_simulation = openmm.OpenMMSimulation(f"production_simulation{id_suffix}")
production_simulation.ensemble = Ensemble.NPT
production_simulation.steps_per_iteration = 1000000
production_simulation.output_frequency = 2000
production_simulation.timestep = 2.0 * unit.femtosecond
production_simulation.thermodynamic_state = ProtocolPath(
"thermodynamic_state", "global"
)
production_simulation.input_coordinate_file = ProtocolPath(
"output_coordinate_file", equilibration_simulation.id
)
production_simulation.parameterized_system = ProtocolPath(
"parameterized_system", assign_parameters.id
)
production_simulation.gradient_parameters = ProtocolPath(
"parameter_gradient_keys", "global"
)
# Set up a conditional group to ensure convergence of uncertainty
if conditional_group is None:
conditional_group = groups.ConditionalGroup(f"conditional_group{id_suffix}")
conditional_group.max_iterations = 100
if use_target_uncertainty:
condition = groups.ConditionalGroup.Condition()
condition.right_hand_value = ProtocolPath("target_uncertainty", "global")
condition.type = groups.ConditionalGroup.Condition.Type.LessThan
condition.left_hand_value = ProtocolPath(
"value.error", conditional_group.id, analysis_protocol.id
)
conditional_group.add_condition(condition)
# Make sure the simulation gets extended after each iteration.
production_simulation.total_number_of_iterations = ProtocolPath(
"current_iteration", conditional_group.id
)
conditional_group.add_protocols(production_simulation, analysis_protocol)
# Point the analyse protocol to the correct data sources
if not isinstance(analysis_protocol, analysis.BaseAverageObservable):
raise ValueError(
"The analysis protocol must inherit from either the "
"AverageTrajectoryObservable or BaseAverageObservable "
"protocols."
)
analysis_protocol.thermodynamic_state = ProtocolPath(
"thermodynamic_state", "global"
)
analysis_protocol.potential_energies = ProtocolPath(
f"observables[{ObservableType.PotentialEnergy.value}]",
production_simulation.id,
)
# Finally, extract uncorrelated data
time_series_statistics = ProtocolPath(
"time_series_statistics", conditional_group.id, analysis_protocol.id
)
coordinate_file = ProtocolPath(
"output_coordinate_file", conditional_group.id, production_simulation.id
)
trajectory_path = ProtocolPath(
"trajectory_file_path", conditional_group.id, production_simulation.id
)
observables = ProtocolPath(
"observables", conditional_group.id, production_simulation.id
)
decorrelate_trajectory = analysis.DecorrelateTrajectory(
f"decorrelate_trajectory{id_suffix}"
)
decorrelate_trajectory.time_series_statistics = time_series_statistics
decorrelate_trajectory.input_coordinate_file = coordinate_file
decorrelate_trajectory.input_trajectory_path = trajectory_path
decorrelate_observables = analysis.DecorrelateObservables(
f"decorrelate_observables{id_suffix}"
)
decorrelate_observables.time_series_statistics = time_series_statistics
decorrelate_observables.input_observables = observables
# Build the object which defines which pieces of simulation data to store.
output_to_store = StoredSimulationData()
output_to_store.thermodynamic_state = ProtocolPath("thermodynamic_state", "global")
output_to_store.property_phase = PropertyPhase.Liquid
output_to_store.force_field_id = PlaceholderValue()
output_to_store.number_of_molecules = ProtocolPath(
"output_number_of_molecules", build_coordinates.id
)
output_to_store.substance = ProtocolPath("output_substance", build_coordinates.id)
output_to_store.statistical_inefficiency = ProtocolPath(
"time_series_statistics.statistical_inefficiency",
conditional_group.id,
analysis_protocol.id,
)
output_to_store.observables = ProtocolPath(
"output_observables", decorrelate_observables.id
)
output_to_store.trajectory_file_name = ProtocolPath(
"output_trajectory_path", decorrelate_trajectory.id
)
output_to_store.coordinate_file_name = coordinate_file
output_to_store.source_calculation_id = PlaceholderValue()
# Define where the final values come from.
final_value_source = ProtocolPath(
"value", conditional_group.id, analysis_protocol.id
)
base_protocols = SimulationProtocols(
build_coordinates,
assign_parameters,
energy_minimisation,
equilibration_simulation,
production_simulation,
analysis_protocol,
conditional_group,
decorrelate_trajectory,
decorrelate_observables,
)
return base_protocols, final_value_source, output_to_store
|
[
"openff.evaluator.protocols.openmm.OpenMMEvaluateEnergies",
"openff.evaluator.protocols.analysis.AverageObservable",
"openff.evaluator.protocols.coordinates.BuildCoordinatesPackmol",
"openff.evaluator.protocols.groups.ConditionalGroup",
"openff.evaluator.protocols.miscellaneous.DummyProtocol",
"dataclasses.astuple",
"openff.evaluator.storage.data.StoredSimulationData",
"openff.evaluator.protocols.reweighting.ConcatenateObservables",
"openff.evaluator.protocols.gradients.ZeroGradients",
"openff.evaluator.protocols.forcefield.BaseBuildSystem",
"openff.evaluator.workflow.utils.ProtocolPath",
"openff.evaluator.protocols.groups.ConditionalGroup.Condition",
"openff.evaluator.protocols.analysis.DecorrelateObservables",
"openff.evaluator.protocols.analysis.DecorrelateTrajectory",
"typing.TypeVar",
"openff.evaluator.attributes.PlaceholderValue",
"openff.evaluator.protocols.openmm.OpenMMEnergyMinimisation",
"openff.evaluator.protocols.reweighting.ReweightObservable",
"openff.evaluator.protocols.reweighting.ConcatenateTrajectories",
"openff.evaluator.workflow.utils.ReplicatorValue",
"openff.evaluator.protocols.openmm.OpenMMSimulation",
"openff.evaluator.workflow.schemas.ProtocolReplicator"
] |
[((928, 978), 'typing.TypeVar', 'TypeVar', (['"""S"""'], {'bound': 'analysis.BaseAverageObservable'}), "('S', bound=analysis.BaseAverageObservable)\n", (935, 978), False, 'from typing import Generic, Optional, Tuple, TypeVar\n'), ((983, 1031), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': 'reweighting.BaseMBARProtocol'}), "('T', bound=reweighting.BaseMBARProtocol)\n", (990, 1031), False, 'from typing import Generic, Optional, Tuple, TypeVar\n'), ((4246, 4293), 'openff.evaluator.workflow.schemas.ProtocolReplicator', 'ProtocolReplicator', ([], {'replicator_id': 'replicator_id'}), '(replicator_id=replicator_id)\n', (4264, 4293), False, 'from openff.evaluator.workflow.schemas import ProtocolReplicator\n'), ((4332, 4374), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""full_system_data"""', '"""global"""'], {}), "('full_system_data', 'global')\n", (4344, 4374), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((4907, 4937), 'openff.evaluator.workflow.utils.ReplicatorValue', 'ReplicatorValue', (['replicator_id'], {}), '(replicator_id)\n', (4922, 4937), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((5012, 5080), 'openff.evaluator.protocols.reweighting.ConcatenateTrajectories', 'reweighting.ConcatenateTrajectories', (['f"""join_trajectories{id_suffix}"""'], {}), "(f'join_trajectories{id_suffix}')\n", (5047, 5080), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((5142, 5201), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""coordinate_file_path"""', 'unpack_stored_data.id'], {}), "('coordinate_file_path', unpack_stored_data.id)\n", (5154, 5201), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((5263, 5322), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""trajectory_file_path"""', 'unpack_stored_data.id'], {}), "('trajectory_file_path', unpack_stored_data.id)\n", (5275, 5322), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((5360, 5426), 'openff.evaluator.protocols.reweighting.ConcatenateObservables', 'reweighting.ConcatenateObservables', (['f"""join_observables{id_suffix}"""'], {}), "(f'join_observables{id_suffix}')\n", (5394, 5426), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((5482, 5532), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""observables"""', 'unpack_stored_data.id'], {}), "('observables', unpack_stored_data.id)\n", (5494, 5532), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((5650, 5712), 'openff.evaluator.protocols.forcefield.BaseBuildSystem', 'forcefield.BaseBuildSystem', (['f"""build_system{replicator_suffix}"""'], {}), "(f'build_system{replicator_suffix}')\n", (5676, 5712), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((5773, 5828), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""force_field_path"""', 'unpack_stored_data.id'], {}), "('force_field_path', unpack_stored_data.id)\n", (5785, 5828), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((5893, 5952), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""coordinate_file_path"""', 'unpack_stored_data.id'], {}), "('coordinate_file_path', unpack_stored_data.id)\n", (5905, 5952), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((6006, 6054), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""substance"""', 'unpack_stored_data.id'], {}), "('substance', unpack_stored_data.id)\n", (6018, 6054), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((6090, 6160), 'openff.evaluator.protocols.openmm.OpenMMEvaluateEnergies', 'openmm.OpenMMEvaluateEnergies', (['f"""reduced_potential{replicator_suffix}"""'], {}), "(f'reduced_potential{replicator_suffix}')\n", (6119, 6160), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((6230, 6293), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""parameterized_system"""', 'build_reference_system.id'], {}), "('parameterized_system', build_reference_system.id)\n", (6242, 6293), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((6362, 6420), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""thermodynamic_state"""', 'unpack_stored_data.id'], {}), "('thermodynamic_state', unpack_stored_data.id)\n", (6374, 6420), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((6490, 6549), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""coordinate_file_path"""', 'unpack_stored_data.id'], {}), "('coordinate_file_path', unpack_stored_data.id)\n", (6502, 6549), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((6619, 6679), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_trajectory_path"""', 'join_trajectories.id'], {}), "('output_trajectory_path', join_trajectories.id)\n", (6631, 6679), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((6780, 6841), 'openff.evaluator.protocols.forcefield.BaseBuildSystem', 'forcefield.BaseBuildSystem', (['f"""build_system_target{id_suffix}"""'], {}), "(f'build_system_target{id_suffix}')\n", (6806, 6841), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((6885, 6927), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""force_field_path"""', '"""global"""'], {}), "('force_field_path', 'global')\n", (6897, 6927), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((6964, 6999), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""substance"""', '"""global"""'], {}), "('substance', 'global')\n", (6976, 6999), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((7047, 7107), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_coordinate_path"""', 'join_trajectories.id'], {}), "('output_coordinate_path', join_trajectories.id)\n", (7059, 7107), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((7154, 7223), 'openff.evaluator.protocols.openmm.OpenMMEvaluateEnergies', 'openmm.OpenMMEvaluateEnergies', (['f"""reduced_potential_target{id_suffix}"""'], {}), "(f'reduced_potential_target{id_suffix}')\n", (7183, 7223), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((7289, 7334), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""thermodynamic_state"""', '"""global"""'], {}), "('thermodynamic_state', 'global')\n", (7301, 7334), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((7401, 7461), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""parameterized_system"""', 'build_target_system.id'], {}), "('parameterized_system', build_target_system.id)\n", (7413, 7461), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((7528, 7588), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_coordinate_path"""', 'join_trajectories.id'], {}), "('output_coordinate_path', join_trajectories.id)\n", (7540, 7588), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((7655, 7715), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_trajectory_path"""', 'join_trajectories.id'], {}), "('output_trajectory_path', join_trajectories.id)\n", (7667, 7715), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((7781, 7830), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""parameter_gradient_keys"""', '"""global"""'], {}), "('parameter_gradient_keys', 'global')\n", (7793, 7830), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((7907, 7960), 'openff.evaluator.protocols.gradients.ZeroGradients', 'gradients.ZeroGradients', (['f"""zero_gradients{id_suffix}"""'], {}), "(f'zero_gradients{id_suffix}')\n", (7930, 7960), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((7999, 8041), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""force_field_path"""', '"""global"""'], {}), "('force_field_path', 'global')\n", (8011, 8041), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((8083, 8132), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""parameter_gradient_keys"""', '"""global"""'], {}), "('parameter_gradient_keys', 'global')\n", (8095, 8132), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((8358, 8433), 'openff.evaluator.protocols.analysis.DecorrelateObservables', 'analysis.DecorrelateObservables', (['f"""decorrelate_target_potential{id_suffix}"""'], {}), "(f'decorrelate_target_potential{id_suffix}')\n", (8389, 8433), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((8506, 8573), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""time_series_statistics"""', 'statistical_inefficiency.id'], {}), "('time_series_statistics', statistical_inefficiency.id)\n", (8518, 8573), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((8641, 8704), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_observables"""', 'reduced_target_potential.id'], {}), "('output_observables', reduced_target_potential.id)\n", (8653, 8704), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((8749, 8818), 'openff.evaluator.protocols.analysis.DecorrelateObservables', 'analysis.DecorrelateObservables', (['f"""decorrelate_observable{id_suffix}"""'], {}), "(f'decorrelate_observable{id_suffix}')\n", (8780, 8818), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((8885, 8952), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""time_series_statistics"""', 'statistical_inefficiency.id'], {}), "('time_series_statistics', statistical_inefficiency.id)\n", (8897, 8952), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((9014, 9067), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_observables"""', 'zero_gradients.id'], {}), "('output_observables', zero_gradients.id)\n", (9026, 9067), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((9827, 9891), 'openff.evaluator.protocols.miscellaneous.DummyProtocol', 'miscellaneous.DummyProtocol', (['f"""replicated_statistics{id_suffix}"""'], {}), "(f'replicated_statistics{id_suffix}')\n", (9854, 9891), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((9945, 10012), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""time_series_statistics"""', 'statistical_inefficiency.id'], {}), "('time_series_statistics', statistical_inefficiency.id)\n", (9957, 10012), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((10066, 10157), 'openff.evaluator.protocols.analysis.DecorrelateObservables', 'analysis.DecorrelateObservables', (['f"""decorrelate_reference_potential{replicator_suffix}"""'], {}), "(\n f'decorrelate_reference_potential{replicator_suffix}')\n", (10097, 10157), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((10228, 10281), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_value"""', 'replicate_statistics.id'], {}), "('output_value', replicate_statistics.id)\n", (10240, 10281), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((10352, 10418), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_observables"""', 'reduced_reference_potential.id'], {}), "('output_observables', reduced_reference_potential.id)\n", (10364, 10418), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((10544, 10636), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_observables[ReducedPotential]"""', 'decorrelate_reference_potential.id'], {}), "('output_observables[ReducedPotential]',\n decorrelate_reference_potential.id)\n", (10556, 10636), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((10699, 10788), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_observables[ReducedPotential]"""', 'decorrelate_target_potential.id'], {}), "('output_observables[ReducedPotential]',\n decorrelate_target_potential.id)\n", (10711, 10788), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((10836, 10897), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_observables"""', 'decorrelate_observable.id'], {}), "('output_observables', decorrelate_observable.id)\n", (10848, 10897), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((10951, 11044), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""time_series_statistics.n_uncorrelated_points"""', 'statistical_inefficiency.id'], {}), "('time_series_statistics.n_uncorrelated_points',\n statistical_inefficiency.id)\n", (10963, 11044), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((12101, 12190), 'openff.evaluator.protocols.analysis.AverageObservable', 'analysis.AverageObservable', (['f"""observable_inefficiency_$({replicator_id}){id_suffix}"""'], {}), "(\n f'observable_inefficiency_$({replicator_id}){id_suffix}')\n", (12127, 12190), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((12281, 12346), 'openff.evaluator.protocols.reweighting.ReweightObservable', 'reweighting.ReweightObservable', (['f"""reweight_observable{id_suffix}"""'], {}), "(f'reweight_observable{id_suffix}')\n", (12311, 12346), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((12570, 12661), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['f"""observables[{observable_type.value}]"""', 'protocols.unpack_stored_data.id'], {}), "(f'observables[{observable_type.value}]', protocols.\n unpack_stored_data.id)\n", (12582, 12661), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((16089, 16157), 'openff.evaluator.protocols.coordinates.BuildCoordinatesPackmol', 'coordinates.BuildCoordinatesPackmol', (['f"""build_coordinates{id_suffix}"""'], {}), "(f'build_coordinates{id_suffix}')\n", (16124, 16157), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((16206, 16241), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""substance"""', '"""global"""'], {}), "('substance', 'global')\n", (16218, 16241), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((16317, 16376), 'openff.evaluator.protocols.forcefield.BaseBuildSystem', 'forcefield.BaseBuildSystem', (['f"""assign_parameters{id_suffix}"""'], {}), "(f'assign_parameters{id_suffix}')\n", (16343, 16376), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((16418, 16460), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""force_field_path"""', '"""global"""'], {}), "('force_field_path', 'global')\n", (16430, 16460), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((16506, 16564), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""coordinate_file_path"""', 'build_coordinates.id'], {}), "('coordinate_file_path', build_coordinates.id)\n", (16518, 16564), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((16613, 16667), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_substance"""', 'build_coordinates.id'], {}), "('output_substance', build_coordinates.id)\n", (16625, 16667), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((16715, 16781), 'openff.evaluator.protocols.openmm.OpenMMEnergyMinimisation', 'openmm.OpenMMEnergyMinimisation', (['f"""energy_minimisation{id_suffix}"""'], {}), "(f'energy_minimisation{id_suffix}')\n", (16746, 16781), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((16844, 16902), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""coordinate_file_path"""', 'build_coordinates.id'], {}), "('coordinate_file_path', build_coordinates.id)\n", (16856, 16902), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((16964, 17022), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""parameterized_system"""', 'assign_parameters.id'], {}), "('parameterized_system', assign_parameters.id)\n", (16976, 17022), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((17069, 17132), 'openff.evaluator.protocols.openmm.OpenMMSimulation', 'openmm.OpenMMSimulation', (['f"""equilibration_simulation{id_suffix}"""'], {}), "(f'equilibration_simulation{id_suffix}')\n", (17092, 17132), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((17425, 17470), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""thermodynamic_state"""', '"""global"""'], {}), "('thermodynamic_state', 'global')\n", (17437, 17470), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((17538, 17600), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_coordinate_file"""', 'energy_minimisation.id'], {}), "('output_coordinate_file', energy_minimisation.id)\n", (17550, 17600), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((17667, 17725), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""parameterized_system"""', 'assign_parameters.id'], {}), "('parameterized_system', assign_parameters.id)\n", (17679, 17725), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((17786, 17846), 'openff.evaluator.protocols.openmm.OpenMMSimulation', 'openmm.OpenMMSimulation', (['f"""production_simulation{id_suffix}"""'], {}), "(f'production_simulation{id_suffix}')\n", (17809, 17846), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((18111, 18156), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""thermodynamic_state"""', '"""global"""'], {}), "('thermodynamic_state', 'global')\n", (18123, 18156), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((18221, 18288), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_coordinate_file"""', 'equilibration_simulation.id'], {}), "('output_coordinate_file', equilibration_simulation.id)\n", (18233, 18288), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((18352, 18410), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""parameterized_system"""', 'assign_parameters.id'], {}), "('parameterized_system', assign_parameters.id)\n", (18364, 18410), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((18473, 18522), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""parameter_gradient_keys"""', '"""global"""'], {}), "('parameter_gradient_keys', 'global')\n", (18485, 18522), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((19914, 19959), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""thermodynamic_state"""', '"""global"""'], {}), "('thermodynamic_state', 'global')\n", (19926, 19959), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((20017, 20115), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['f"""observables[{ObservableType.PotentialEnergy.value}]"""', 'production_simulation.id'], {}), "(f'observables[{ObservableType.PotentialEnergy.value}]',\n production_simulation.id)\n", (20029, 20115), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((20206, 20292), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""time_series_statistics"""', 'conditional_group.id', 'analysis_protocol.id'], {}), "('time_series_statistics', conditional_group.id,\n analysis_protocol.id)\n", (20218, 20292), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((20325, 20415), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_coordinate_file"""', 'conditional_group.id', 'production_simulation.id'], {}), "('output_coordinate_file', conditional_group.id,\n production_simulation.id)\n", (20337, 20415), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((20448, 20536), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""trajectory_file_path"""', 'conditional_group.id', 'production_simulation.id'], {}), "('trajectory_file_path', conditional_group.id,\n production_simulation.id)\n", (20460, 20536), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((20565, 20640), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""observables"""', 'conditional_group.id', 'production_simulation.id'], {}), "('observables', conditional_group.id, production_simulation.id)\n", (20577, 20640), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((20685, 20753), 'openff.evaluator.protocols.analysis.DecorrelateTrajectory', 'analysis.DecorrelateTrajectory', (['f"""decorrelate_trajectory{id_suffix}"""'], {}), "(f'decorrelate_trajectory{id_suffix}')\n", (20715, 20753), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((21008, 21078), 'openff.evaluator.protocols.analysis.DecorrelateObservables', 'analysis.DecorrelateObservables', (['f"""decorrelate_observables{id_suffix}"""'], {}), "(f'decorrelate_observables{id_suffix}')\n", (21039, 21078), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((21331, 21353), 'openff.evaluator.storage.data.StoredSimulationData', 'StoredSimulationData', ([], {}), '()\n', (21351, 21353), False, 'from openff.evaluator.storage.data import StoredSimulationData\n'), ((21397, 21442), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""thermodynamic_state"""', '"""global"""'], {}), "('thermodynamic_state', 'global')\n", (21409, 21442), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((21539, 21557), 'openff.evaluator.attributes.PlaceholderValue', 'PlaceholderValue', ([], {}), '()\n', (21555, 21557), False, 'from openff.evaluator.attributes import PlaceholderValue\n'), ((21601, 21665), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_number_of_molecules"""', 'build_coordinates.id'], {}), "('output_number_of_molecules', build_coordinates.id)\n", (21613, 21665), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((21712, 21766), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_substance"""', 'build_coordinates.id'], {}), "('output_substance', build_coordinates.id)\n", (21724, 21766), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((21814, 21925), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""time_series_statistics.statistical_inefficiency"""', 'conditional_group.id', 'analysis_protocol.id'], {}), "('time_series_statistics.statistical_inefficiency',\n conditional_group.id, analysis_protocol.id)\n", (21826, 21925), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((21987, 22049), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_observables"""', 'decorrelate_observables.id'], {}), "('output_observables', decorrelate_observables.id)\n", (21999, 22049), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((22107, 22172), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""output_trajectory_path"""', 'decorrelate_trajectory.id'], {}), "('output_trajectory_path', decorrelate_trajectory.id)\n", (22119, 22172), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((22291, 22309), 'openff.evaluator.attributes.PlaceholderValue', 'PlaceholderValue', ([], {}), '()\n', (22307, 22309), False, 'from openff.evaluator.attributes import PlaceholderValue\n'), ((22383, 22448), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""value"""', 'conditional_group.id', 'analysis_protocol.id'], {}), "('value', conditional_group.id, analysis_protocol.id)\n", (22395, 22448), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((12976, 13072), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['f"""output_observables[{observable_type.value}]"""', 'protocols.join_observables.id'], {}), "(f'output_observables[{observable_type.value}]', protocols.\n join_observables.id)\n", (12988, 13072), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((13288, 13390), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['f"""output_observables[{observable_type.value}]"""', 'protocols.decorrelate_observable.id'], {}), "(f'output_observables[{observable_type.value}]', protocols.\n decorrelate_observable.id)\n", (13300, 13390), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((18671, 18727), 'openff.evaluator.protocols.groups.ConditionalGroup', 'groups.ConditionalGroup', (['f"""conditional_group{id_suffix}"""'], {}), "(f'conditional_group{id_suffix}')\n", (18694, 18727), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((1715, 1728), 'dataclasses.astuple', 'astuple', (['self'], {}), '(self)\n', (1722, 1728), False, 'from dataclasses import astuple, dataclass\n'), ((2734, 2747), 'dataclasses.astuple', 'astuple', (['self'], {}), '(self)\n', (2741, 2747), False, 'from dataclasses import astuple, dataclass\n'), ((18836, 18871), 'openff.evaluator.protocols.groups.ConditionalGroup.Condition', 'groups.ConditionalGroup.Condition', ([], {}), '()\n', (18869, 18871), False, 'from openff.evaluator.protocols import analysis, coordinates, forcefield, gradients, groups, miscellaneous, openmm, reweighting, storage\n'), ((18913, 18957), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""target_uncertainty"""', '"""global"""'], {}), "('target_uncertainty', 'global')\n", (18925, 18957), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((19075, 19146), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""value.error"""', 'conditional_group.id', 'analysis_protocol.id'], {}), "('value.error', conditional_group.id, analysis_protocol.id)\n", (19087, 19146), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n'), ((19372, 19427), 'openff.evaluator.workflow.utils.ProtocolPath', 'ProtocolPath', (['"""current_iteration"""', 'conditional_group.id'], {}), "('current_iteration', conditional_group.id)\n", (19384, 19427), False, 'from openff.evaluator.workflow.utils import ProtocolPath, ReplicatorValue\n')]
|
from django.contrib.auth import get_user_model
from django.test import TestCase, override_settings
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
TASK_URL = reverse('todo:task-list')
def sample_get_request(client):
return client.get(TASK_URL)
def sample_post_request(client):
payload = {'title': 'Middleware POST test'}
return client.post(TASK_URL, payload)
class MiddlewareResponseTests(TestCase):
"""Tests the custom middleware"""
def setUp(self):
self.user = get_user_model().objects.create(
email="<EMAIL>",
password="<PASSWORD>")
self.client = APIClient()
self.client.force_authenticate(self.user)
@override_settings(MAINTENANCE_MODE=True)
def test_maintenance_mode_ON(self):
"""
Tests the response for all allowed methods
when on maintenance mode enabled
"""
# Test GET method
self.assertEqual(sample_get_request(self.client).status_code,
status.HTTP_503_SERVICE_UNAVAILABLE)
# Test POST method
self.assertEqual(sample_post_request(self.client).status_code,
status.HTTP_503_SERVICE_UNAVAILABLE)
@override_settings(MAINTENANCE_MODE=False)
def test_maintenance_mode_OFF(self):
"""
Test the response for all allowed methods
when maintenance mode disabled
"""
# Test Get method
self.assertEqual(sample_get_request(self.client).status_code,
status.HTTP_200_OK)
# Test POST method
self.assertEqual(sample_post_request(self.client).status_code,
status.HTTP_201_CREATED)
|
[
"django.urls.reverse",
"rest_framework.test.APIClient",
"django.test.override_settings",
"django.contrib.auth.get_user_model"
] |
[((222, 247), 'django.urls.reverse', 'reverse', (['"""todo:task-list"""'], {}), "('todo:task-list')\n", (229, 247), False, 'from django.urls import reverse\n'), ((749, 789), 'django.test.override_settings', 'override_settings', ([], {'MAINTENANCE_MODE': '(True)'}), '(MAINTENANCE_MODE=True)\n', (766, 789), False, 'from django.test import TestCase, override_settings\n'), ((1271, 1312), 'django.test.override_settings', 'override_settings', ([], {'MAINTENANCE_MODE': '(False)'}), '(MAINTENANCE_MODE=False)\n', (1288, 1312), False, 'from django.test import TestCase, override_settings\n'), ((681, 692), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (690, 692), False, 'from rest_framework.test import APIClient\n'), ((562, 578), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (576, 578), False, 'from django.contrib.auth import get_user_model\n')]
|
from django.test import TestCase
from mock import patch
from cryton.lib.util import exceptions, logger
from cryton.lib.models import session
from cryton.cryton_rest_api.models import (
SessionModel,
PlanExecutionModel,
StepModel
)
import os
from model_bakery import baker
TESTS_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
@patch('cryton.lib.util.logger.logger', logger.structlog.getLogger('cryton-debug'))
class TestSession(TestCase):
def setUp(self) -> None:
self.plan_exec_obj = baker.make(PlanExecutionModel)
self.named_session_obj = SessionModel.objects.create(plan_execution=self.plan_exec_obj,
session_id='42',
session_name='test-session',
session_type=SessionModel.MSF_SHELL_TYPE
)
self.step_model = baker.make(StepModel)
pass
def test_create_session(self):
# Wrong plan execution ID
with self.assertRaises(exceptions.PlanExecutionDoesNotExist):
session.create_session(0, '0', 'test')
sess_obj = session.create_session(self.plan_exec_obj.id, '0', 'test', SessionModel.MSF_SHELL_TYPE)
self.assertEqual(sess_obj.session_name, 'test')
self.assertEqual(sess_obj.session_type, SessionModel.MSF_SHELL_TYPE)
def test_get_msf_session_id(self):
session_id = session.get_msf_session_id('test-session', self.plan_exec_obj.id)
self.assertEqual(session_id, '42')
def test_get_msf_session_id_ex(self):
with self.assertRaises(exceptions.SessionObjectDoesNotExist):
session.get_msf_session_id('non-existent-session', self.plan_exec_obj.id)
def test_set_msf_session_id(self):
session.set_msf_session_id('test-session', '666', self.plan_exec_obj.id)
self.assertEqual(session.get_msf_session_id('test-session', self.plan_exec_obj.id), '666')
with self.assertRaises(exceptions.SessionObjectDoesNotExist):
session.set_msf_session_id('test-session', '666', 666)
# @patch('cryton.lib.session.get_session_ids')
# def test_get_session_ids(self, mock_get_sess):
# mock_stub = Mock()
# mock_stub.sessions_list().sess_list = '["1", "2"]'
#
# self.step_model.use_any_session_to_target = '1.2.3.4'
# session_list = session.get_session_ids('1.2.3.4', self.plan_exec_obj.id)
#
# self.assertEqual('2', session_list[-1])
|
[
"cryton.lib.models.session.create_session",
"cryton.cryton_rest_api.models.SessionModel.objects.create",
"os.path.realpath",
"model_bakery.baker.make",
"cryton.lib.models.session.get_msf_session_id",
"cryton.lib.util.logger.structlog.getLogger",
"cryton.lib.models.session.set_msf_session_id"
] |
[((402, 444), 'cryton.lib.util.logger.structlog.getLogger', 'logger.structlog.getLogger', (['"""cryton-debug"""'], {}), "('cryton-debug')\n", (428, 444), False, 'from cryton.lib.util import exceptions, logger\n'), ((331, 357), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (347, 357), False, 'import os\n'), ((535, 565), 'model_bakery.baker.make', 'baker.make', (['PlanExecutionModel'], {}), '(PlanExecutionModel)\n', (545, 565), False, 'from model_bakery import baker\n'), ((599, 759), 'cryton.cryton_rest_api.models.SessionModel.objects.create', 'SessionModel.objects.create', ([], {'plan_execution': 'self.plan_exec_obj', 'session_id': '"""42"""', 'session_name': '"""test-session"""', 'session_type': 'SessionModel.MSF_SHELL_TYPE'}), "(plan_execution=self.plan_exec_obj, session_id=\n '42', session_name='test-session', session_type=SessionModel.MSF_SHELL_TYPE\n )\n", (626, 759), False, 'from cryton.cryton_rest_api.models import SessionModel, PlanExecutionModel, StepModel\n'), ((1021, 1042), 'model_bakery.baker.make', 'baker.make', (['StepModel'], {}), '(StepModel)\n', (1031, 1042), False, 'from model_bakery import baker\n'), ((1268, 1360), 'cryton.lib.models.session.create_session', 'session.create_session', (['self.plan_exec_obj.id', '"""0"""', '"""test"""', 'SessionModel.MSF_SHELL_TYPE'], {}), "(self.plan_exec_obj.id, '0', 'test', SessionModel.\n MSF_SHELL_TYPE)\n", (1290, 1360), False, 'from cryton.lib.models import session\n'), ((1552, 1617), 'cryton.lib.models.session.get_msf_session_id', 'session.get_msf_session_id', (['"""test-session"""', 'self.plan_exec_obj.id'], {}), "('test-session', self.plan_exec_obj.id)\n", (1578, 1617), False, 'from cryton.lib.models import session\n'), ((1910, 1982), 'cryton.lib.models.session.set_msf_session_id', 'session.set_msf_session_id', (['"""test-session"""', '"""666"""', 'self.plan_exec_obj.id'], {}), "('test-session', '666', self.plan_exec_obj.id)\n", (1936, 1982), False, 'from cryton.lib.models import session\n'), ((1209, 1247), 'cryton.lib.models.session.create_session', 'session.create_session', (['(0)', '"""0"""', '"""test"""'], {}), "(0, '0', 'test')\n", (1231, 1247), False, 'from cryton.lib.models import session\n'), ((1787, 1860), 'cryton.lib.models.session.get_msf_session_id', 'session.get_msf_session_id', (['"""non-existent-session"""', 'self.plan_exec_obj.id'], {}), "('non-existent-session', self.plan_exec_obj.id)\n", (1813, 1860), False, 'from cryton.lib.models import session\n'), ((2008, 2073), 'cryton.lib.models.session.get_msf_session_id', 'session.get_msf_session_id', (['"""test-session"""', 'self.plan_exec_obj.id'], {}), "('test-session', self.plan_exec_obj.id)\n", (2034, 2073), False, 'from cryton.lib.models import session\n'), ((2165, 2219), 'cryton.lib.models.session.set_msf_session_id', 'session.set_msf_session_id', (['"""test-session"""', '"""666"""', '(666)'], {}), "('test-session', '666', 666)\n", (2191, 2219), False, 'from cryton.lib.models import session\n')]
|
# -*- coding: utf-8 -*-
#################################################################
# File : napari_browser_adv.py
# Version : 0.0.1
# Author : czsrh
# Date : 18.11.2020
# Institution : Carl Zeiss Microscopy GmbH
#
# Copyright (c) 2020 <NAME>, Germany. All Rights Reserved.
#################################################################
from PyQt5.QtWidgets import (
# QPushButton,
# QComboBox,
QHBoxLayout,
QFileDialog,
QDialogButtonBox,
QWidget,
QTableWidget,
QTableWidgetItem,
QCheckBox,
# QDockWidget,
# QSlider,
)
from PyQt5.QtCore import Qt
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QFont
import napari
import numpy as np
# from czitools import imgfileutils as imf
import imgfileutils as imf
from aicsimageio import AICSImage
import dask.array as da
import os
from pathlib import Path
def show_image_napari(array, metadata,
blending='additive',
gamma=0.75,
rename_sliders=False):
"""Show the multidimensional array using the Napari viewer
:param array: multidimensional NumPy.Array containing the pixeldata
:type array: NumPy.Array
:param metadata: dictionary with CZI or OME-TIFF metadata
:type metadata: dict
:param blending: NapariViewer option for blending, defaults to 'additive'
:type blending: str, optional
:param gamma: NapariViewer value for Gamma, defaults to 0.85
:type gamma: float, optional
:param verbose: show additional output, defaults to True
:type verbose: bool, optional
:param rename_sliders: name slider with correct labels output, defaults to False
:type verbose: bool, optional
"""
# create scalefcator with all ones
scalefactors = [1.0] * len(array.shape)
dimpos = imf.get_dimpositions(metadata['Axes_aics'])
# get the scalefactors from the metadata
scalef = imf.get_scalefactor(metadata)
# modify the tuple for the scales for napari
scalefactors[dimpos['Z']] = scalef['zx']
# remove C dimension from scalefactor
scalefactors_ch = scalefactors.copy()
del scalefactors_ch[dimpos['C']]
if metadata['SizeC'] > 1:
# add all channels as layers
for ch in range(metadata['SizeC']):
try:
# get the channel name
chname = metadata['Channels'][ch]
except KeyError as e:
print(e)
# or use CH1 etc. as string for the name
chname = 'CH' + str(ch + 1)
# cut out channel
# use dask if array is a dask.array
if isinstance(array, da.Array):
print('Extract Channel using Dask.Array')
channel = array.compute().take(ch, axis=dimpos['C'])
else:
# use normal numpy if not
print('Extract Channel NumPy.Array')
channel = array.take(ch, axis=dimpos['C'])
# actually show the image array
print('Adding Channel : ', chname)
print('Shape Channel : ', ch, channel.shape)
print('Scaling Factors : ', scalefactors_ch)
# get min-max values for initial scaling
clim = imf.calc_scaling(channel,
corr_min=1.0,
offset_min=0,
corr_max=0.85,
offset_max=0)
# add channel to napari viewer
viewer.add_image(channel,
name=chname,
scale=scalefactors_ch,
contrast_limits=clim,
blending=blending,
gamma=gamma)
if metadata['SizeC'] == 1:
# just add one channel as a layer
try:
# get the channel name
chname = metadata['Channels'][0]
except KeyError:
# or use CH1 etc. as string for the name
chname = 'CH' + str(ch + 1)
# actually show the image array
print('Adding Channel: ', chname)
print('Scaling Factors: ', scalefactors)
# use dask if array is a dask.array
if isinstance(array, da.Array):
print('Extract Channel using Dask.Array')
array = array.compute()
# get min-max values for initial scaling
clim = imf.calc_scaling(array)
viewer.add_image(array,
name=chname,
scale=scalefactors,
contrast_limits=clim,
blending=blending,
gamma=gamma)
if rename_sliders:
print('Renaming the Sliders based on the Dimension String ....')
if metadata['SizeC'] == 1:
# get the position of dimension entries after removing C dimension
dimpos_viewer = imf.get_dimpositions(metadata['Axes_aics'])
# get the label of the sliders
sliders = viewer.dims.axis_labels
# update the labels with the correct dimension strings
slidernames = ['B', 'S', 'T', 'Z', 'C']
if metadata['SizeC'] > 1:
new_dimstring = metadata['Axes_aics'].replace('C', '')
# get the position of dimension entries after removing C dimension
dimpos_viewer = imf.get_dimpositions(new_dimstring)
# get the label of the sliders
sliders = viewer.dims.axis_labels
# update the labels with the correct dimension strings
slidernames = ['B', 'S', 'T', 'Z']
for s in slidernames:
if dimpos_viewer[s] >= 0:
sliders[dimpos_viewer[s]] = s
# apply the new labels to the viewer
viewer.dims.axis_labels = sliders
class CheckBoxWidget(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.layout = QHBoxLayout(self)
self.cbox = QCheckBox("Use Dask Delayed ImageReader", self)
self.layout.addWidget(self.cbox)
self.cbox.setChecked(True)
# adjust font
fnt = QFont()
fnt.setPointSize(12)
fnt.setBold(True)
fnt.setFamily("Arial")
self.cbox.setFont(fnt)
class TableWidget(QWidget):
# def __init__(self, md):
def __init__(self):
super(QWidget, self).__init__()
self.layout = QHBoxLayout(self)
self.mdtable = QTableWidget()
self.layout.addWidget(self.mdtable)
self.mdtable.setShowGrid(True)
self.mdtable.setHorizontalHeaderLabels(['Parameter', 'Value'])
header = self.mdtable.horizontalHeader()
header.setDefaultAlignment(Qt.AlignLeft)
def update_metadata(self, md):
row_count = len(md)
col_count = 2
self.mdtable.setColumnCount(col_count)
self.mdtable.setRowCount(row_count)
row = 0
for key, value in md.items():
newkey = QTableWidgetItem(key)
self.mdtable.setItem(row, 0, newkey)
newvalue = QTableWidgetItem(str(value))
self.mdtable.setItem(row, 1, newvalue)
row += 1
# fit columns to content
self.mdtable.resizeColumnsToContents()
def update_style(self):
fnt = QFont()
fnt.setPointSize(11)
fnt.setBold(True)
fnt.setFamily("Arial")
item1 = QtWidgets.QTableWidgetItem('Parameter')
item1.setForeground(QtGui.QColor(25, 25, 25))
item1.setFont(fnt)
self.mdtable.setHorizontalHeaderItem(0, item1)
item2 = QtWidgets.QTableWidgetItem('Value')
item2.setForeground(QtGui.QColor(25, 25, 25))
item2.setFont(fnt)
self.mdtable.setHorizontalHeaderItem(1, item2)
class Open_files(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.layout = QHBoxLayout(self)
self.file_dialog = QFileDialog()
self.file_dialog.setWindowFlags(Qt.Widget)
self.file_dialog.setModal(False)
self.file_dialog.setOption(QFileDialog.DontUseNativeDialog)
# Remove open and cancel button from widget
self.buttonBox = self.file_dialog.findChild(QDialogButtonBox, "buttonBox")
self.buttonBox.clear()
# Only open following file types
self.file_dialog.setNameFilter("Images (*.czi *.ome.tiff *ome.tif *.tiff *.tif)")
self.layout.addWidget(self.file_dialog)
self.file_dialog.currentChanged.connect(self.open_path)
def open_path(self, path):
if os.path.isfile(path):
# remove exitings layers from napari
viewer.layers.select_all()
viewer.layers.remove_selected()
# get the metadata
md, addmd = imf.get_metadata(path)
# add the metadata and adapt the table display
mdbrowser.update_metadata(md)
mdbrowser.update_style()
use_dask = checkbox.cbox.isChecked()
print('Use Dask : ', use_dask)
# get AICSImageIO object
img = AICSImage(path)
if use_dask:
stack = img.dask_data
if not use_dask:
stack = img.get_image_data()
# add the image stack to the napari viewer
show_image_napari(stack, md,
blending='additive',
gamma=0.85,
rename_sliders=True)
# start the main application
with napari.gui_qt():
filebrowser = Open_files()
mdbrowser = TableWidget()
checkbox = CheckBoxWidget()
# create a viewer
viewer = napari.Viewer()
# add widgets
viewer.window.add_dock_widget(filebrowser, name='filebrowser', area='right')
viewer.window.add_dock_widget(checkbox, name='checkbox', area='right')
viewer.window.add_dock_widget(mdbrowser, name='mdbrowser', area='right')
|
[
"imgfileutils.calc_scaling",
"PyQt5.QtWidgets.QTableWidget",
"PyQt5.QtGui.QColor",
"imgfileutils.get_metadata",
"napari.gui_qt",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QCheckBox",
"PyQt5.QtGui.QFont",
"aicsimageio.AICSImage",
"os.path.isfile",
"imgfileutils.get_scalefactor",
"imgfileutils.get_dimpositions",
"PyQt5.QtWidgets.QTableWidgetItem",
"napari.Viewer",
"PyQt5.QtWidgets.QFileDialog"
] |
[((1840, 1883), 'imgfileutils.get_dimpositions', 'imf.get_dimpositions', (["metadata['Axes_aics']"], {}), "(metadata['Axes_aics'])\n", (1860, 1883), True, 'import imgfileutils as imf\n'), ((1943, 1972), 'imgfileutils.get_scalefactor', 'imf.get_scalefactor', (['metadata'], {}), '(metadata)\n', (1962, 1972), True, 'import imgfileutils as imf\n'), ((9587, 9602), 'napari.gui_qt', 'napari.gui_qt', ([], {}), '()\n', (9600, 9602), False, 'import napari\n'), ((9734, 9749), 'napari.Viewer', 'napari.Viewer', ([], {}), '()\n', (9747, 9749), False, 'import napari\n'), ((4476, 4499), 'imgfileutils.calc_scaling', 'imf.calc_scaling', (['array'], {}), '(array)\n', (4492, 4499), True, 'import imgfileutils as imf\n'), ((6016, 6033), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['self'], {}), '(self)\n', (6027, 6033), False, 'from PyQt5.QtWidgets import QHBoxLayout, QFileDialog, QDialogButtonBox, QWidget, QTableWidget, QTableWidgetItem, QCheckBox\n'), ((6054, 6101), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['"""Use Dask Delayed ImageReader"""', 'self'], {}), "('Use Dask Delayed ImageReader', self)\n", (6063, 6101), False, 'from PyQt5.QtWidgets import QHBoxLayout, QFileDialog, QDialogButtonBox, QWidget, QTableWidget, QTableWidgetItem, QCheckBox\n'), ((6215, 6222), 'PyQt5.QtGui.QFont', 'QFont', ([], {}), '()\n', (6220, 6222), False, 'from PyQt5.QtGui import QFont\n'), ((6487, 6504), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['self'], {}), '(self)\n', (6498, 6504), False, 'from PyQt5.QtWidgets import QHBoxLayout, QFileDialog, QDialogButtonBox, QWidget, QTableWidget, QTableWidgetItem, QCheckBox\n'), ((6528, 6542), 'PyQt5.QtWidgets.QTableWidget', 'QTableWidget', ([], {}), '()\n', (6540, 6542), False, 'from PyQt5.QtWidgets import QHBoxLayout, QFileDialog, QDialogButtonBox, QWidget, QTableWidget, QTableWidgetItem, QCheckBox\n'), ((7370, 7377), 'PyQt5.QtGui.QFont', 'QFont', ([], {}), '()\n', (7375, 7377), False, 'from PyQt5.QtGui import QFont\n'), ((7481, 7520), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', (['"""Parameter"""'], {}), "('Parameter')\n", (7507, 7520), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7673, 7708), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', (['"""Value"""'], {}), "('Value')\n", (7699, 7708), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7961, 7978), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['self'], {}), '(self)\n', (7972, 7978), False, 'from PyQt5.QtWidgets import QHBoxLayout, QFileDialog, QDialogButtonBox, QWidget, QTableWidget, QTableWidgetItem, QCheckBox\n'), ((8006, 8019), 'PyQt5.QtWidgets.QFileDialog', 'QFileDialog', ([], {}), '()\n', (8017, 8019), False, 'from PyQt5.QtWidgets import QHBoxLayout, QFileDialog, QDialogButtonBox, QWidget, QTableWidget, QTableWidgetItem, QCheckBox\n'), ((8635, 8655), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (8649, 8655), False, 'import os\n'), ((3274, 3360), 'imgfileutils.calc_scaling', 'imf.calc_scaling', (['channel'], {'corr_min': '(1.0)', 'offset_min': '(0)', 'corr_max': '(0.85)', 'offset_max': '(0)'}), '(channel, corr_min=1.0, offset_min=0, corr_max=0.85,\n offset_max=0)\n', (3290, 3360), True, 'import imgfileutils as imf\n'), ((4987, 5030), 'imgfileutils.get_dimpositions', 'imf.get_dimpositions', (["metadata['Axes_aics']"], {}), "(metadata['Axes_aics'])\n", (5007, 5030), True, 'import imgfileutils as imf\n'), ((5452, 5487), 'imgfileutils.get_dimpositions', 'imf.get_dimpositions', (['new_dimstring'], {}), '(new_dimstring)\n', (5472, 5487), True, 'import imgfileutils as imf\n'), ((7050, 7071), 'PyQt5.QtWidgets.QTableWidgetItem', 'QTableWidgetItem', (['key'], {}), '(key)\n', (7066, 7071), False, 'from PyQt5.QtWidgets import QHBoxLayout, QFileDialog, QDialogButtonBox, QWidget, QTableWidget, QTableWidgetItem, QCheckBox\n'), ((7549, 7573), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(25)', '(25)', '(25)'], {}), '(25, 25, 25)\n', (7561, 7573), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7737, 7761), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(25)', '(25)', '(25)'], {}), '(25, 25, 25)\n', (7749, 7761), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8846, 8868), 'imgfileutils.get_metadata', 'imf.get_metadata', (['path'], {}), '(path)\n', (8862, 8868), True, 'import imgfileutils as imf\n'), ((9157, 9172), 'aicsimageio.AICSImage', 'AICSImage', (['path'], {}), '(path)\n', (9166, 9172), False, 'from aicsimageio import AICSImage\n')]
|
#!/usr/bin/env python
# coding:utf8
"""
@Time : 2018/10/31
@Author : fls
@Contact : <EMAIL>
@Desc : fls易用性utils-日期相关utils
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2018/10/31 11:41 fls 1.0 create
2020/08/01 11:43 fls 1.1 新增函数get_current_week
"""
import datetime
FMT_DATETIME = '%Y%m%d%H%M%S'
FMT_DATETIME_SEPARATE = '%Y-%m-%d %H:%M:%S'
FMT_DATE = '%Y%m%d'
FMT_TIME = '%H%M%S'
def fmt_date(date=None, fmt=FMT_DATETIME_SEPARATE):
"""格式化日期(date = datetime.datetime.now(), fmt = '%Y-%m-%d %H:%M:%S')
\t\t@param: date 日期,为空则取当前日期
\t\t@param: fmt 格式化样式
"""
if not date:
date = datetime.datetime.now()
n = date.strftime(fmt)
return n
def str2date(date=None, fmt=FMT_DATETIME_SEPARATE):
"""
字符串转日期时间格式
:param date:
:param fmt:
:return:
"""
if not date:
return fmt_date(date=None, fmt=fmt)
return datetime.datetime.strptime(date, fmt)
def get_day_n(date=None, day=1, fmt=FMT_DATETIME_SEPARATE):
"""获取n天后或-n天前的日期(date = datetime.datetime.now(), day = 1, fmt = '%Y-%m-%d %H:%M:%S')
\t\t@param: date 日期,为空则取当前日期
\t\t@param: day n天后的日期,默认1天后,为负数则取n天前的日期
\t\t@param: fmt 格式化样式
"""
if not date:
date = datetime.datetime.now()
return fmt_date(date=date + datetime.timedelta(days=day), fmt=fmt)
def get_seconds_n(date=None, seconds=0, fmt=FMT_DATETIME_SEPARATE):
"""获取n秒后或-n秒前的日期(date = datetime.datetime.now(), seconds = 1, fmt = '%Y-%m-%d %H:%M:%S')
\t\t@param: date 日期,为空则取当前日期
\t\t@param: seconds n秒后的时间,默认0秒后,为负数则取n秒前的时间
\t\t@param: fmt 格式化样式
"""
if not date:
date = datetime.datetime.now()
return fmt_date(date=date + datetime.timedelta(seconds=seconds), fmt=fmt)
def get_interval_day(start, end, fmt=FMT_DATE):
"""获取日期间的天数(start, end, fmt = '%Y%m%d')
\t\t@param: start 开始日期
\t\t@param: end 结束日期
\t\t@param: fmt 格式化样式
"""
def gen_dates(b_date, days):
day = datetime.timedelta(days=1)
for i in range(days):
yield b_date + day * i
if start is None:
return []
start = datetime.datetime.strptime(start, fmt)
if end is None:
end = datetime.datetime.now()
else:
end = datetime.datetime.strptime(end, fmt)
data = []
for d in gen_dates(start, (end - start).days + 1):
data.append(d.strftime(fmt))
return data
def reformat_date_str(rq1, fmt1, fmt2):
"""按目标格式,重新格式化日期(rq1, fmt1, fmt2)
\t\t@param: rq1 开始日期
\t\t@param: fmt1 rq1的格式
\t\t@param: fmt2 目标格式
"""
return datetime.datetime.strptime(rq1, fmt1).strftime(fmt2)
def get_current_week(date=None, fmt=FMT_DATE):
"""
返回日期所在周的日期字符串列表
:param date:
:param fmt:
:return:
"""
if not date:
date = datetime.datetime.now()
monday = date
one_day = datetime.timedelta(days=1)
while monday.weekday() != 0:
monday -= one_day
# 返回所在周的字符串列表
ret = []
for i in range(7):
ret.append((monday + datetime.timedelta(days=i)).strftime(fmt))
return ret
def help(num='①'):
print(num + "关于日期时间")
print("\tfmt_date(date = datetime.datetime.now(), fmt = '%Y-%m-%d %H:%M:%S')")
print("\t" + fmt_date.__doc__)
print("\tafter_date(date = datetime.datetime.now(), day = 1, fmt = '%Y-%m-%d %H:%M:%S)")
print("\t" + get_day_n.__doc__)
print("\tafterSeconds(date = datetime.datetime.now(), seconds = 0, fmt = '%Y-%m-%d %H:%M:%S)")
print("\t" + get_seconds_n.__doc__)
print("\tinterval_day(start, end, fmt = '%Y%m%d')")
print("\t" + get_interval_day.__doc__)
print("\treformat_date_str(rq1, fmt1, fmt2)")
print("\t" + reformat_date_str.__doc__)
|
[
"datetime.datetime.strptime",
"datetime.timedelta",
"datetime.datetime.now"
] |
[((996, 1033), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', 'fmt'], {}), '(date, fmt)\n', (1022, 1033), False, 'import datetime\n'), ((2210, 2248), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['start', 'fmt'], {}), '(start, fmt)\n', (2236, 2248), False, 'import datetime\n'), ((2940, 2966), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2958, 2966), False, 'import datetime\n'), ((729, 752), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (750, 752), False, 'import datetime\n'), ((1329, 1352), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1350, 1352), False, 'import datetime\n'), ((1735, 1758), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1756, 1758), False, 'import datetime\n'), ((2065, 2091), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2083, 2091), False, 'import datetime\n'), ((2283, 2306), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2304, 2306), False, 'import datetime\n'), ((2331, 2367), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['end', 'fmt'], {}), '(end, fmt)\n', (2357, 2367), False, 'import datetime\n'), ((2884, 2907), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2905, 2907), False, 'import datetime\n'), ((2668, 2705), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['rq1', 'fmt1'], {}), '(rq1, fmt1)\n', (2694, 2705), False, 'import datetime\n'), ((1385, 1413), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'day'}), '(days=day)\n', (1403, 1413), False, 'import datetime\n'), ((1791, 1826), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'seconds'}), '(seconds=seconds)\n', (1809, 1826), False, 'import datetime\n'), ((3110, 3136), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'i'}), '(days=i)\n', (3128, 3136), False, 'import datetime\n')]
|
# Generated by Django 3.2.4 on 2021-09-30 11:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0006_reservation_iscanceled'),
]
operations = [
migrations.AddField(
model_name='reservation',
name='Amount',
field=models.DecimalField(decimal_places=2, default=10, max_digits=7),
preserve_default=False,
),
migrations.AddField(
model_name='reservation',
name='PricePerHour',
field=models.DecimalField(decimal_places=2, default=3, max_digits=5),
preserve_default=False,
),
]
|
[
"django.db.models.DecimalField"
] |
[((339, 402), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'default': '(10)', 'max_digits': '(7)'}), '(decimal_places=2, default=10, max_digits=7)\n', (358, 402), False, 'from django.db import migrations, models\n'), ((569, 631), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'default': '(3)', 'max_digits': '(5)'}), '(decimal_places=2, default=3, max_digits=5)\n', (588, 631), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: wikirecs
# language: python
# name: wikirecs
# ---
# # WikiRecs
# A project to recommend the next Wikipedia article you might like to edit
# + init_cell=true
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import logging
import wikipedia
import requests
import os
import wikirecs as wr
import implicit
from scipy.sparse import csr_matrix, csc_matrix, lil_matrix, coo_matrix
from tqdm.auto import tqdm
import umap
import pickle
import collections
import recommenders
import plotly.express as px
from pyarrow import feather
import itertools
from itables import show
import matplotlib
from implicit.nearest_neighbours import (
bm25_weight)
# -
from itables.javascript import load_datatables
load_datatables()
# + init_cell=true
pd.set_option('display.max_rows', 100)
pd.set_option('display.min_rows', 100)
# + init_cell=true
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
# -
# # Assemble the complete histories
import os
all_histories = []
for fname in os.listdir('edit_histories_2021-05-28'):
if 'feather' in fname:
all_histories.append(feather.read_feather('edit_histories_2021-05-28/{}'.format(fname)))
all_histories = pd.concat(all_histories, ignore_index=True)
feather.write_feather(all_histories, "all_histories_2021-05-28.feather")
# %%time
all_histories = feather.read_feather("all_histories_2021-05-28.feather")
all_histories.columns
len(all_histories.pageid.unique())
# # Load all_histories (raw data), transform and split
# +
# %%time
all_histories = feather.read_feather("all_histories_2021-05-28.feather")
print("Length raw edit history data: {}".format(len(all_histories)))
# +
from pull_edit_histories import get_edit_history
## Add one particular user
cols = ['userid', 'user', 'pageid', 'title',
'timestamp', 'sizediff']
with open("../username.txt", "r") as file:
for username in file:
oneuser = get_edit_history(user=username.strip(),
latest_timestamp="2021-05-28T22:02:09Z",
earliest_timestamp="2020-05-28T22:02:09Z")
oneuser = pd.DataFrame(oneuser).loc[:,cols]
all_histories = pd.concat([all_histories, oneuser], ignore_index=True)
print("Length after adding users: {}".format(len(all_histories)))
# -
# ## EDA on raw histories
# Look at the distribution of edit counts
edit_counts = all_histories.groupby('userid').userid.count().values
plt.figure(figsize=(20,8))
plt.subplot(1,2,1)
sns.distplot(edit_counts,kde=False,bins=np.arange(0,20000,200))
plt.xlabel('Number of edits by user')
plt.subplot(1,2,2)
sns.distplot(edit_counts,kde=False,bins=np.arange(0,200,1))
plt.xlim([0,200])
plt.xlabel('Number of edits by user')
num_counts = len(edit_counts)
print("Median edit counts: %d" % np.median(edit_counts))
thres = 5
over_thres = np.sum(edit_counts > thres)
print("Number over threshold %d: %d (%.f%%)" % (thres, over_thres, 100*over_thres/num_counts))
# Most edits by user
all_histories.groupby(['userid','user']).userid.count().sort_values(ascending=False)
# Find the elbow in number of edits
plt.plot(all_histories.groupby(['userid','user']).userid.count().sort_values(ascending=False).values)
# plt.ylim([0,20000])
# +
# What are the most popular pages (edited by the most users)
page_popularity = all_histories.drop_duplicates(subset=['title','user']).groupby('title').count().user.sort_values()
pd.set_option('display.max_rows', 1000)
page_popularity.iloc[-1000:].iloc[::-1]
# -
# ## Clean data
# ### Remove consecutive edits and summarize runs
# +
# %%time
def remove_consecutive_edits(df):
c = dict(zip(df.columns, range(len(df.columns))))
keyfunc = lambda x: (x[c['userid']],x[c['pageid']])
first_and_last = lambda run: [run[0][c['userid']],
run[0][c['user']],
run[0][c['pageid']],
run[0][c['title']],
run[-1][c['timestamp']],
run[0][c['timestamp']],
sum([abs(r[c['sizediff']]) for r in run]),
len(run)]
d = df.values.tolist()
return pd.DataFrame([first_and_last(list(g)) for k,g in itertools.groupby(d, key=keyfunc)],
columns=['userid', 'user', 'pageid', 'title', 'first_timestamp', 'last_timestamp','sum_sizediff','consecutive_edits'])
clean_histories = remove_consecutive_edits(all_histories)
# -
# ### Remove top N most popular pages
# +
# Get the top most popular pages
TOPN = 20
popularpages = all_histories.drop_duplicates(subset=['title','pageid','userid']).groupby(['title','pageid']).count().user.sort_values()[-TOPN:]
before_count = len(all_histories)
# -
popularpages
# Remove those popular pages
popular_pageids = popularpages.index.get_level_values(level='pageid').values
is_popular_page_edit = clean_histories.pageid.isin(popular_pageids)
clean_histories = clean_histories.loc[~is_popular_page_edit].copy()
all_histories = None
after_count = len(clean_histories)
print("%d edits (%.1f%%) were in top %d popular pages. Length after removing: %d" % (np.sum(is_popular_page_edit),
100* np.sum(is_popular_page_edit)/before_count,
TOPN,
after_count)
)
print("Number of unique page ids: {}".format(len(clean_histories.pageid.unique())))
# ### Remove users with too many or too few edits
MIN_EDITS = 5
MAX_EDITS = 10000
# Get user edit counts
all_user_edit_counts = clean_histories.groupby(['userid','user']).userid.count()
# +
# Remove users with too few edits
keep_user = all_user_edit_counts.values >= MIN_EDITS
# Remove users with too many edits
keep_user = keep_user & (all_user_edit_counts.values <= MAX_EDITS)
# Remove users with "bot" in the name
is_bot = ['bot' in username.lower() for username in all_user_edit_counts.index.get_level_values(1).values]
keep_user = keep_user & ~np.array(is_bot)
print("Keep %d users out of %d (%.1f%%)" % (np.sum(keep_user), len(all_user_edit_counts), 100*float(np.sum(keep_user))/len(all_user_edit_counts)))
# +
# Remove those users
userids_to_keep = all_user_edit_counts.index.get_level_values(0).values[keep_user]
clean_histories = clean_histories.loc[clean_histories.userid.isin(userids_to_keep)]
clean_histories = clean_histories.reset_index(drop=True)
# -
print("Length after removing users: {}".format(len(clean_histories)))
# %%time
# Save cleaned histories
feather.write_feather(clean_histories, '../clean_histories_2021-05-28.feather')
# ## Build lookup tables
# %%time
clean_histories = feather.read_feather('../clean_histories_2021-05-28.feather')
# +
# Page id to title and back
lookup = clean_histories.drop_duplicates(subset=['pageid']).loc[:,['pageid','title']]
p2t = dict(zip(lookup.pageid, lookup.title))
t2p = dict(zip(lookup.title, lookup.pageid))
# User id to name and back
lookup = clean_histories.drop_duplicates(subset=['userid']).loc[:,['userid','user']]
u2n = dict(zip(lookup.userid, lookup.user))
n2u = dict(zip(lookup.user, lookup.userid))
# +
# Page id and userid to index in cooccurence matrix and back
pageids = np.sort(clean_histories.pageid.unique())
userids = np.sort(clean_histories.userid.unique())
p2i = {pageid:i for i, pageid in enumerate(pageids)}
u2i = {userid:i for i, userid in enumerate(userids)}
i2p = {v: k for k, v in p2i.items()}
i2u = {v: k for k, v in u2i.items()}
# +
# User name and page title to index and back
n2i = {k:u2i[v] for k, v in n2u.items() if v in u2i}
t2i = {k:p2i[v] for k, v in t2p.items() if v in p2i}
i2n = {v: k for k, v in n2i.items()}
i2t = {v: k for k, v in t2i.items()}
# -
wr.save_pickle((p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t), '../lookup_tables_2021-05-28.pickle')
wr.save_pickle((userids, pageids), '../users_and_pages_2021-05-28.pickle')
#
# ## Build test and training set
p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t = wr.load_pickle('../lookup_tables_2021-05-28.pickle')
userids, pageids = wr.load_pickle('../users_and_pages_2021-05-28.pickle')
# Make a test set from the most recent edit by each user
histories_test = clean_histories.groupby(['userid','user'],as_index=False).first()
# Subtract it from the rest to make the training set
histories_train = wr.dataframe_set_subtract(clean_histories, histories_test)
histories_train.reset_index(drop=True, inplace=True)
# Make a dev set from the second most recent edit by each user
histories_dev = histories_train.groupby(['userid','user'],as_index=False).first()
# Subtract it from the rest to make the final training set
histories_train = wr.dataframe_set_subtract(histories_train, histories_dev)
histories_train.reset_index(drop=True, inplace=True)
print("Length of test set: {}".format(len(histories_test)))
print("Length of dev set: {}".format(len(histories_dev)))
print("Length of training after removal of test: {}".format(len(histories_train)))
print("Number of pages in training set: {}".format(len(histories_train.pageid.unique())))
print("Number of users in training set: {}".format(len(histories_train.userid.unique())))
print("Number of pages with > 1 user editing: {}".format(np.sum(histories_train.drop_duplicates(subset=['title','user']).groupby('title').count().user > 1)))
feather.write_feather(histories_train, '../histories_train_2021-05-28.feather')
feather.write_feather(histories_dev, '../histories_dev_2021-05-28.feather')
feather.write_feather(histories_test, '../histories_test_2021-05-28.feather')
# +
resurface_userids, discovery_userids = wr.get_resurface_discovery(histories_train, histories_dev)
print("%d out of %d userids are resurfaced (%.1f%%)" % (len(resurface_userids), len(userids), 100*float(len(resurface_userids))/len(userids)))
print("%d out of %d userids are discovered (%.1f%%)" % (len(discovery_userids), len(userids), 100*float(len(discovery_userids))/len(userids)))
# -
wr.save_pickle((resurface_userids, discovery_userids), '../resurface_discovery_users_2021-05-28.pickle')
# # FIG Rama and other examples
print("Number of edits by Rama in a year: {}".format(len(all_histories.loc[all_histories.user == 'Rama'])))
print("Number of pages edited: {}".format(len(all_histories.loc[all_histories.user == 'Rama'].drop_duplicates(subset=['pageid']))))
# +
from pull_edit_histories import get_edit_history
oneuser = get_edit_history(user="Thornstrom",
latest_timestamp="2021-05-28T22:02:09Z",
earliest_timestamp="2020-05-28T22:02:09Z")
oneuser = pd.DataFrame(oneuser).loc[:,cols]
# -
wr.print_user_history(all_histories, user="Rama")
wr.print_user_history(all_histories, user="Meow")
# # Build matrix for implicit collaborative filtering
# +
# %%time
# Get the user/page edit counts
for_implicit = histories_train.groupby(["userid","pageid"]).count().first_timestamp.reset_index().rename(columns={'first_timestamp':'edits'})
for_implicit.loc[:,'edits'] = for_implicit.edits.astype(np.int32)
# +
row = np.array([p2i[p] for p in for_implicit.pageid.values])
col = np.array([u2i[u] for u in for_implicit.userid.values])
implicit_matrix_coo = coo_matrix((for_implicit.edits.values, (row, col)))
implicit_matrix = csc_matrix(implicit_matrix_coo)
# -
# %%time
wr.save_pickle(implicit_matrix,'../implicit_matrix_2021-05-28.pickle')
# ### Test the matrix and indices
implicit_matrix = wr.load_pickle('../implicit_matrix_2021-05-28.pickle')
# +
# Crude item to item recs by looking for items edited by the same editors (count how many editors overlap)
veditors = np.flatnonzero(implicit_matrix[t2i['Hamburger'],:].toarray())
indices = np.flatnonzero(np.sum(implicit_matrix[:,veditors] > 0,axis=1))
totals = np.asarray(np.sum(implicit_matrix[:,veditors] > 0 ,axis=1)[indices])
sorted_order = np.argsort(totals.squeeze())
[i2t.get(i, "") + " " + str(total[0]) for i,total in zip(indices[sorted_order],totals[sorted_order])][::-1]
# -
# Histories of editors who had that item
for ved in veditors:
print("\n\n\n" + i2n[ved])
wr.print_user_history(all_histories, user=i2n[ved])
# # Implicit recommendation
implicit_matrix = wr.load_pickle('../implicit_matrix_2021-05-28.pickle')
p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t = wr.load_pickle('../lookup_tables_2021-05-28.pickle')
bm25_matrix = bm25_weight(implicit_matrix, K1=100, B=0.25)
num_factors =200
regularization = 0.01
os.environ["OPENBLAS_NUM_THREADS"] = "1"
model = implicit.als.AlternatingLeastSquares(
factors=num_factors, regularization=regularization
)
model.fit(bm25_matrix)
wr.save_pickle(model,'../als%d_bm25_model.pickle' % num_factors)
model = wr.load_pickle('../als200_bm25_model_2021-05-28.pickle')
results = model.similar_items(t2i['Steven Universe'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
u = n2u["Rama"]
recommendations = model.recommend(u2i[u], bm25_matrix.tocsc(), N=1000, filter_already_liked_items=False)
[ ("*" if implicit_matrix[ind,u2i[u]]>0 else "") +
'%s %.4f' % (i2t[ind], score) + ' %d' % (implicit_matrix[ind,:]>0).sum()
for ind, score in recommendations]
# ## Grid search results
grid_search_results = wr.load_pickle("../implicit_grid_search.pickle")
pd.DataFrame(grid_search_results)
pd.DataFrame([[i['num_factors'], i['regularization']] + list(i['metrics'].values()) for i in grid_search_results],
columns = ['num_factors','regularization'] + list(grid_search_results[0]['metrics'].keys()))
grid_search_results_bm25 = wr.load_pickle("../implicit_grid_search_bm25.pickle")
pd.DataFrame([[i['num_factors'], i['regularization']] + list(i['metrics'].values()) for i in grid_search_results_bm25],
columns = ['num_factors','regularization'] + list(grid_search_results_bm25[0]['metrics'].keys()))
# # B25 Recommendation
from implicit.nearest_neighbours import BM25Recommender
# +
bm25_matrix = bm25_weight(implicit_matrix, K1=20, B=1)
bm25_matrix = bm25_matrix.tocsc()
sns.distplot(implicit_matrix[implicit_matrix.nonzero()],bins = np.arange(0,100,1),kde=False)
sns.distplot(bm25_matrix[bm25_matrix.nonzero()],bins = np.arange(0,100,1),kde=False)
# -
K1 = 100
B = 0.25
model = BM25Recommender(K1, B)
model.fit(implicit_matrix)
wr.save_pickle(model, '../bm25_model_2021-05-28.pkl')
results = model.similar_items(t2i['<NAME>'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
a = ['Steven Universe 429.4746',
'List of Steven Universe episodes 178.4544',
'Demon Bear 128.7237',
'Legion of Super Heroes (TV series) 128.7237',
'The Amazing World of Gumball 126.3522',
'Steven Universe Future 123.9198']
results = model.similar_items(t2i['Steven Universe'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
results = model.similar_items(t2i['<NAME>'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
results = model.similar_items(t2i['Hamburger'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
u = n2u["Rama"]
recommendations = model.recommend(u2i[u], implicit_matrix.astype(np.float32), N=1000, filter_already_liked_items=True)
[ ("*" if implicit_matrix[ind,u2i[u]]>0 else "") +
'%s %.4f' % (i2t[ind], score)
for ind, score in recommendations]
plt.plot([ score for i,(ind, score) in enumerate(recommendations) if implicit_matrix[ind,u2i[u]]==0])
wr.save_pickle(model, "b25_model.pickle")
model = wr.load_pickle("b25_model.pickle")
# # Evaluate models
# ## Item to item recommendation
results = model.similar_items(t2i['Steven Universe'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
# ## User to item recommendations
# +
# Check out a specific example
u = n2u["HyprMarc"]
wr.print_user_history(clean_histories, userid=u)
# -
u = n2u["HyprMarc"]
recommendations = model.recommend(u2i[u], implicit_matrix, N=100, filter_already_liked_items=False)
[ ("*" if implicit_matrix[ind,u2i[u]]>0 else "") +
'%s %.4f' % (i2t[ind], score)
for ind, score in recommendations]
# # Visualize implicit embeddings
model = wr.load_pickle('../als150_model.pickle')
# +
# Only plot the ones with over 3 entries
indices = np.squeeze(np.asarray(np.sum(implicit_matrix[nonzero,:],axis=1))) > 3
indices = nonzero[indices]
# -
len(indices)
# Visualize the collaborative filtering item vectors, embedding into 2D space with UMAP
# nonzero = np.flatnonzero(implicit_matrix.sum(axis=1))
# indices = nonzero[::100]
embedding = umap.UMAP().fit_transform(model.item_factors[indices,:])
plt.figure(figsize=(10,10))
plt.plot(embedding[:,0], embedding[:,1],'.')
# _ = plt.axis('square')
# ## Visualize actors in the embeddings space
# +
edit_counts = np.squeeze(np.asarray(np.sum(implicit_matrix[indices,:],axis=1)))
log_edit_counts = np.log10(np.squeeze(np.asarray(np.sum(implicit_matrix[indices,:],axis=1))))
emb_df = pd.DataFrame({'dim1':embedding[:,0].squeeze(),
'dim2':embedding[:,1].squeeze(),
'title':[i2t[i] for i in indices],
'edit_count':edit_counts,
'log_edit_count':log_edit_counts
})
# -
actors = ['<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME> (actor)',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>']
actor_indices = [t2i[a] for a in actors]
edit_counts = np.squeeze(np.asarray(np.sum(implicit_matrix[actor_indices,:],axis=1)))
log_edit_counts = np.log10(np.squeeze(np.asarray(np.sum(implicit_matrix[actor_indices,:],axis=1))))
embedding = umap.UMAP().fit_transform(model.item_factors[actor_indices,:])
emb_df = pd.DataFrame({'dim1':embedding[:,0].squeeze(),
'dim2':embedding[:,1].squeeze(),
'title':[i2t[i] for i in actor_indices],
'edit_count':edit_counts,
'log_edit_count':log_edit_counts
})
key = np.zeros(len(actors))
key[:8] = 1
fig = px.scatter(data_frame=emb_df,
x='dim1',
y='dim2',
hover_name='title',
color=key,
hover_data=['edit_count'])
fig.update_layout(
autosize=False,
width=600,
height=600,)
fig.show()
# +
# Full embedding plotly interactive visualization
emb_df = pd.DataFrame({'dim1':embedding[:,0].squeeze(),
'dim2':embedding[:,1].squeeze(),
'title':[i2t[i] for i in indices],
'edit_count':edit_counts,
'log_edit_count':log_edit_counts
})
fig = px.scatter(data_frame=emb_df,
x='dim1',
y='dim2',
hover_name='title',
color='log_edit_count',
hover_data=['edit_count'])
fig.update_layout(
autosize=False,
width=600,
height=600,)
fig.show()
# -
# # Evaluate on test set
# +
# Load the edit histories in the training set and the test set
histories_train = feather.read_feather('../histories_train_2021-05-28.feather')
histories_test = feather.read_feather('../histories_test_2021-05-28.feather')
histories_dev = feather.read_feather('../histories_dev_2021-05-28.feather')
implicit_matrix = wr.load_pickle('../implicit_matrix_2021-05-28.pickle')
p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t = wr.load_pickle('../lookup_tables_2021-05-28.pickle')
userids, pageids = wr.load_pickle('../users_and_pages_2021-05-28.pickle')
resurface_userids, discovery_userids = wr.load_pickle('../resurface_discovery_users_2021-05-28.pickle')
results = {}
# -
wr.display_recs_with_history(
recs,
userids[:100],
histories_test,
histories_train,
p2t,
u2n,
recs_to_display=5,
hist_to_display=10,
)
# ## Most popular
# +
# %%time
K=20
rec_name = "Popularity"
prec = recommenders.PopularityRecommender(histories_train)
precs = prec.recommend_all(userids, K)
wr.save_pickle(precs, "../" + rec_name +"_recs.pickle")
# +
results[rec_name] = wr.get_recs_metrics(
histories_dev, precs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# -
# ## Most recent
# %%time
# Most recent
K=20
rrec = recommenders.MostRecentRecommender(histories_train)
rrecs = rrec.recommend_all(userids, K, interactions=histories_train)
rec_name = "Recent"
wr.save_pickle(rrecs, "../" + rec_name +"_recs.pickle")
len(resurface_userids)
results ={}
results[rec_name] = wr.get_recs_metrics(
histories_dev, rrecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# ## Most frequent
# %%time
# Sorted by frequency of edits
K=20
frec = recommenders.MostFrequentRecommender(histories_train)
frecs = frec.recommend_all(userids, K, interactions=histories_train)
rec_name = "Frequent"
wr.save_pickle(frecs, "../" + rec_name +"_recs.pickle")
results[rec_name] = wr.get_recs_metrics(
histories_dev, frecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# ## BM25
# %%time
K=20
brec = recommenders.MyBM25Recommender(model, implicit_matrix)
brecs = brec.recommend_all(userids, K, u2i=u2i, n2i=n2i, i2p=i2p, filter_already_liked_items=False)
rec_name = "bm25"
wr.save_pickle(brecs, "../" + rec_name +"_recs.pickle")
# filter_already_liked_items = False
results[rec_name] = wr.get_recs_metrics(
histories_dev, brecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# filter_already_liked_items = True
rec_name = "bm25_filtered"
brecs_filtered = brec.recommend_all(userids, K, u2i=u2i, n2i=n2i, i2p=i2p, filter_already_liked_items=True)
wr.save_pickle(brecs_filtered, "../" + rec_name +"_recs.pickle")
results[rec_name] = wr.get_recs_metrics(
histories_dev, recs['bm25_filtered'], K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
results[rec_name] = wr.get_recs_metrics(
histories_dev, recs['bm25_filtered'], K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# ## ALS Implicit collaborative filtering
model_als = wr.load_pickle('../als200_bm25_model_2021-05-28.pickle')
# %%time
rec_name = "als"
K=20
irec = recommenders.ImplicitCollaborativeRecommender(model_als, bm25_matrix.tocsc())
irecs = irec.recommend_all(userids, K, i2p=i2p, filter_already_liked_items=False)
wr.save_pickle(irecs, "../" + rec_name +"_recs.pickle")
results[rec_name] = wr.get_recs_metrics(
histories_dev, irecs, K, discovery_userids, resurface_userids, bm25_matrix.tocsc(), i2p, u2i)
results[rec_name]
rec_name = "als_filtered"
K=20
irec = recommenders.ImplicitCollaborativeRecommender(model_als, bm25_matrix.tocsc())
irecs_filtered = irec.recommend_all(userids, K, i2p=i2p, filter_already_liked_items=True)
results[rec_name] = wr.get_recs_metrics(
histories_dev, irecs_filtered, K, discovery_userids, resurface_userids, bm25_matrix.tocsc(), i2p, u2i)
results[rec_name]
wr.save_pickle(irecs_filtered, "../" + rec_name +"_recs.pickle")
show(pd.DataFrame(results).T)
# ## Jaccard
# %%time
# Sorted by Jaccard
K=20
rrec = recommenders.MostRecentRecommender(histories_train)
recent_pages_dict = rrec.all_recent_only(K, userids, interactions=histories_train)
jrec = recommenders.JaccardRecommender(implicit_matrix, p2i=p2i, t2i=t2i, i2t=i2t, i2p=i2p, n2i=n2i, u2i=u2i, i2u=i2u)
jrecs = jrec.recommend_all(userids,
K,
num_lookpage_pages=1,
recent_pages_dict=recent_pages_dict,
interactions=histories_train)
wr.save_pickle(jrecs,"jaccard-1_recs.pickle")
rec_name = "Jaccard"
results[rec_name] = wr.get_recs_metrics(
histories_dev, jrecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
wr.display_recs_with_history(
jrecs,
userids[:30],
histories_test,
histories_train,
p2t,
u2n,
recs_to_display=5,
hist_to_display=10,
)
# %%time
# Sorted by Jaccard
K=5
jrec = recommenders.JaccardRecommender(implicit_matrix, p2i=p2i, t2i=t2i, i2t=i2t, i2p=i2p, n2i=n2i, u2i=u2i, i2u=i2u)
jrecs = jrec.recommend_all(userids[:1000],
10,
num_lookpage_pages=50,
recent_pages_dict=recent_pages_dict,
interactions=histories_train)
print("Jaccard")
print("Recall @ %d: %.1f%%" % (K, 100*wr.recall(histories_test, jrecs, K)))
print("Prop resurfaced: %.1f%%" % (100*wr.prop_resurface(jrecs, K, implicit_matrix, i2p, u2i)))
print("Recall @ %d (discovery): %.1f%%" % (K, 100*wr.recall(histories_test, jrecs, K, userid_subset=discovery_userids)))
print("Recall @ %d (resurface): %.1f%%" % (K, 100*wr.recall(histories_test, jrecs, K, userid_subset=resurface_userids)))
# ## Interleaved
recs.keys()
# +
# Interleaved jaccard and recent
K=20
rec_name = "Interleaved"
print(rec_name)
intrec = recommenders.InterleaveRecommender()
intrecs = intrec.recommend_all(K, [recs['Recent'], recs['bm25_filtered']])
wr.save_pickle(intrecs, "../" + rec_name +"_recs.pickle")
# -
results[rec_name] = wr.get_recs_metrics(
histories_dev, intrecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# # Report on evaluations results
# ## Hard coded metrics
# +
results = {}
results["Popularity"] = {'recall': 0.16187274312040842,
'ndcg': 0.0005356797596941751,
'resurfaced': 0.6213422985929523,
'recall_discover': 0.11947959996459864,
'recall_resurface': 0.2624396388830569,
'ndcg_discover': 0.000410354483750028,
'ndcg_resurface': 0.0008329819416998272}
results["Recent"] = {'recall': 22.618602913709378,
'ndcg': 0.14306080818547054,
'resurfaced': 71.13808990163118,
'recall_discover': 0.03982653332153288,
'recall_resurface': 76.18097837497375,
'ndcg_discover': 0.00011494775493754298,
'ndcg_resurface': 0.4821633227780786}
results["Frequent"] = {'recall': 20.834889802017184,
'ndcg': 0.11356953338215306,
'resurfaced': 76.10353629684971,
'recall_discover': 0.035401362952473675,
'recall_resurface': 70.17635943732941,
'ndcg_discover': 9.90570471847343e-05,
'ndcg_resurface': 0.38274923359395385}
results["ALS"] = {'recall': 5.488108579255385,
'ndcg': 0.026193145556306998,
'resurfaced': 16.251556468683848,
'recall_discover': 1.146119125586335,
'recall_resurface': 15.788368675204703,
'ndcg_discover': 0.004817135435898367,
'ndcg_resurface': 0.0769022655123215}
results["ALS_filtered"] = {'recall': 0.9027518366330469,
'ndcg': 0.003856703716094881,
'resurfaced': 0.0,
'recall_discover': 1.2832994070271706,
'recall_resurface': 0.0,
'ndcg_discover': 0.005482465270193466,
'ndcg_resurface': 0.0}
results["BM25"] = {'recall': 18.945336819823186,
'ndcg': 0.1015175508656068,
'resurfaced': 74.0469742248786,
'recall_discover': 1.3939286662536507,
'recall_resurface': 60.581566239764854,
'ndcg_discover': 0.004204510293040833,
'ndcg_resurface': 0.332367864833573}
results["BM25_filtered"] = {'recall': 1.8148424853691942,
'ndcg': 0.008622285155255174,
'resurfaced': 0.14848711243929774,
'recall_discover': 2.522347110363749,
'recall_resurface': 0.1364686122191896,
'ndcg_discover': 0.011740495141426633,
'ndcg_resurface': 0.0012251290280766518}
results["Interleaved"] = {'recall': 21.382766778732414,
'ndcg': 0.12924273396038563,
'resurfaced': 42.478676379031256,
'recall_discover': 1.8364457031595716,
'recall_resurface': 67.75141717404996,
'ndcg_discover': 0.006943981897312752,
'ndcg_resurface': 0.4193652616867473}
results_df = pd.DataFrame(results).T
results_df.reset_index(inplace=True)
# -
# ## Table of results
results_df
# ### FIG Table for post
# +
def scatter_text(x, y, text_column, data, title, xlabel, ylabel):
"""Scatter plot with country codes on the x y coordinates
Based on this answer: https://stackoverflow.com/a/54789170/2641825"""
# Create the scatter plot
p1 = sns.scatterplot(x, y, data=data, size = 8, legend=False)
# Add text besides each point
for line in range(0,data.shape[0]):
p1.text(data[x][line]+0.01, data[y][line],
data[text_column][line], horizontalalignment='left',
size='medium', color='black', weight='semibold')
# Set title and axis labels
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
return p1
def highlight_max(s):
'''
highlight the maximum in a Series yellow.
'''
is_max = s == s.max()
return ['background-color: yellow' if v else '' for v in is_max]
results_df.sort_values("recall", ascending=False).style.apply(highlight_max, subset=["recall",
"ndcg",
"resurfaced",
"recall_discover",
"recall_resurface",
"ndcg_discover",
"ndcg_resurface",]).format({"recall": "{:.1f}%",
"ndcg": "{:.3f}",
"resurfaced": "{:.1f}%",
"recall_discover": "{:.1f}%",
"recall_resurface": "{:.1f}%",
"ndcg_discover": "{:.3f}",
"ndcg_resurface": "{:.3f}",
})
# -
colnames = ["Recommender", "Recall@20", "nDCG@20","Resurfaced","Recall@20 discovery","Recall@20 resurface","nDCG@20 discovery","nDCG@20 resurface"]
#apply(highlight_max, subset=colnames[1:]).
results_df.columns = colnames
results_df.sort_values("Recall@20", ascending=False).style.\
format({"Recall@20": "{:.1f}%",
"nDCG@20": "{:.3f}",
"Resurfaced": "{:.1f}%",
"Recall@20 discovery": "{:.1f}%",
"Recall@20 resurface": "{:.1f}%",
"nDCG@20 discovery": "{:.3f}",
"nDCG@20 resurface": "{:.3f}",
})
# ## Scatter plots (resurface vs discover)
fig = px.scatter(data_frame=results_df,
x='ndcg_discover',
y='ndcg_resurface',
hover_name='index')
# hover_name='title',)
fig.show()
fig = px.scatter(data_frame=results_df,
x='recall_discover',
y='recall_resurface',
hover_name='index')
# hover_name='title',)
fig.show()
# ### FIG Scatterplot for post
x = 2*[results_df.loc[results_df.Recommender == "Interleaved","Recall@20 resurface"].values[0]]
y = [0, results_df.loc[results_df.Recommender == "Interleaved","Recall@20 discovery"].values[0]]
# +
sns.set_theme(style="darkgrid")
matplotlib.rcParams.update({'font.size': 48, 'figure.figsize':(8,5), 'legend.edgecolor':'k'})
plt.figure(figsize=(12,7))
A = results_df.loc[:,'Recall@20 discovery']
B = results_df.loc[:,'Recall@20 resurface']
x = 2*[results_df.loc[results_df.Recommender == "Interleaved","Recall@20 discovery"].values[0]]
y = [-1, results_df.loc[results_df.Recommender == "Interleaved","Recall@20 resurface"].values[0]]
plt.plot(x,y,":k")
x[0] = 0
y[0] = y[1]
# plt.rcParams.update({'font.size': 48})
plt.rc('xtick', labelsize=3)
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 22}
matplotlib.rc('font', **font)
plt.plot(x,y,":k")
plt.plot(A,B,'.', MarkerSize=15)
for xyz in zip(results_df.Recommender, A, B): # <--
plt.gca().annotate('%s' % xyz[0], xy=np.array(xyz[1:])+(0.05,0), textcoords='data', fontsize=18) # <--
for tick in plt.gca().xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(20)
plt.xlabel("Recall@20 discovery (%)",fontsize=20)
plt.ylabel("Recall@20 resurface (%)",fontsize=20)
plt.xlim([0,3])
plt.ylim([-2,85])
axes = plt.gca()
# -
# ## Read recs in from files
recommender_names = ['Popularity', 'Recent', 'Frequent', 'ALS', 'ALS_filtered', 'BM25', 'BM25_filtered', 'Interleaved']
recs = {rname:wr.load_pickle("../" + rname + "_recs.pickle") for rname in recommender_names}
# ## Recall curves
histories_dev = feather.read_feather('../histories_dev_2021-05-28.feather')
plt.figure(figsize=(15,10))
for rname in recommender_names:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20)
# print(recall_curve[-1])
plt.plot(recall_curve,'.-')
plt.legend(recommender_names)
plt.figure(figsize=(15,10))
for rname in recommender_names:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20, discovery_userids)
plt.plot(recall_curve,'.-')
plt.legend(recommender_names)
plt.figure(figsize=(15,10))
for rname in recommender_names:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20, resurface_userids)
plt.plot(recall_curve,'.-')
plt.legend(recommender_names)
# ### FIG Implicit vs BM25 figure
sns.set_theme(style="darkgrid")
matplotlib.rcParams.update({'font.size': 18, 'figure.figsize':(8,5), 'legend.edgecolor':'k'})
plt.figure(figsize=(10,6))
for rname in ["ALS","BM25"]:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20, discovery_userids)
plt.plot(np.array(recall_curve)*100,'.-',markersize=12)
plt.legend( ["ALS","BM25"],title="Algorithm", fontsize=16, title_fontsize=16, facecolor="w")
plt.xlabel("@N",fontsize=20)
plt.ylabel("Discovery recall (%)",fontsize=20)
_ = plt.xticks(np.arange(0,20,2),np.arange(0,20,2)+1)
# plt.gca().legend(prop=dict(size=20))
for tick in plt.gca().xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(20)
# # User recommendation comparison
recs_subset = ["Recent","Frequent","Popularity","Implicit","bm25","interleaved"]
print("Next edit: " + histories_dev.loc[histories_dev.userid == userid].title.values[0])
# ## FIG Rama table
# +
def bold_viewed(val, viewed_pages):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
weight = 'bold' if val in viewed_pages else 'normal'
return 'font-weight: %s' % weight
def color_target(val, target_page):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = 'red' if val == target_page else 'black'
return 'color: %s' % color
def display_user_recs_comparison(user_name, recs, recs_subset, train_set, test_set, N=20):
userid = n2u[user_name]
recs_table = pd.DataFrame({rec_name: [p2t[r] for r in recs[rec_name][userid][:N]] for rec_name in recs_subset})
recs_table = recs_table.reset_index()
recs_table.loc[:,"index"] = recs_table.loc[:,"index"]+1
recs_table = recs_table.rename(columns={"index":""})
viewed_pages = train_set.loc[train_set.userid == userid,["title"]].drop_duplicates(subset=["title"]).values.squeeze()
target_page = test_set.loc[test_set.userid == userid].title.values[0]
# print("Next edit: " + target_page)
s = recs_table.style.applymap(bold_viewed, viewed_pages=viewed_pages).applymap(color_target, target_page=target_page)
display(s)
# +
recs_subset = ["Recent","Frequent","Popularity","ALS","ALS_filtered","BM25","BM25_filtered"]
display_user_recs_comparison('Rama', recs, recs_subset, histories_train, histories_dev, N=10)
# -
# ## Other individuals tables
display_user_recs_comparison('Meow', recs, recs_subset, histories_train, histories_dev, N=10)
display_user_recs_comparison('KingArti', recs, recs_subset, histories_train, histories_dev, N=10)
display_user_recs_comparison('Tulietto', recs, recs_subset, histories_train, histories_dev, N=10)
display_user_recs_comparison('Thornstrom', recs, recs_subset, histories_train, histories_dev, N=10)
# ## FIG Interleaved
display_user_recs_comparison('Rama', recs,['Interleaved'], histories_train, histories_dev, N=10)
display_user_recs_comparison('KingArti', recs,['Interleaved'], histories_train, histories_dev, N=10)
N = 20
display(pd.DataFrame({rec_name: [p2t[r] for r in recs[rec_name][n2u['HenryXVII']]][:N] for rec_name in recs_subset}))
persons_of_interest = [
"DoctorWho42",
"AxelSjögren",
"<NAME>",
"Tulietto",
"LipaCityPH",
"<NAME>",
"Thornstrom",
"Meow",
"HyprMarc",
"Jampilot",
"Rama"
]
N=10
irec_500 = recommenders.ImplicitCollaborativeRecommender(model, implicit_matrix)
irecs_poi = irec_500.recommend_all([n2u[user_name] for user_name in persons_of_interest], N, u2i=u2i, n2i=n2i, i2p=i2p)
# # Find interesting users
# +
edited_pages = clean_histories.drop_duplicates(subset=['title','user']).groupby('user').userid.count()
edited_pages = edited_pages[edited_pages > 50]
edited_pages = edited_pages[edited_pages < 300]
# -
clean_histories.columns
display_user_recs_comparison("Rama", recs, recs_subset, histories_train, histories_dev, N=20)
# +
index = list(range(len(edited_pages)))
np.random.shuffle(index)
for i in index[:10]:
user_name = edited_pages.index[i]
print(user_name)
display_user_recs_comparison(user_name, recs, recs_subset, histories_train, histories_dev, N=20)
print("\n\n\n")
# +
index = list(range(len(edited_pages)))
np.random.shuffle(index)
for i in index[:10]:
print(edited_pages.index[i])
display_user_recs_comparison
wr.print_user_history(user=edited_pages.index[i],all_histories=clean_histories)
print("\n\n\n")
# -
sns.distplot(edited_pages,kde=False,bins=np.arange(0,2000,20))
# # Repetition analysis
import itertools
clean_histories.head()
clean_histories.iloc[:1000].values.tolist()
df = clean_histories
dict(zip(df.columns, range(len(df.columns))))
def identify_runs(df):
d = df.loc[:,['userid','pageid']].values.tolist()
return [(k, len(list(g))) for k,g in itertools.groupby(d)]
# %%time
runs = identify_runs(clean_histories)
# +
lens = np.array([r[1] for r in runs])
single_edits = np.sum(lens==1)
total_edits = len(clean_histories)
print("Percent of edits that are part of a run: %.1f%%" % (100*(1-(float(single_edits)/total_edits))))
print("Percent of edits that are repetitions: %.1f%%" % (100*(1-len(runs)/total_edits)))
|
[
"matplotlib.pyplot.title",
"matplotlib.rc",
"numpy.sum",
"wikirecs.display_recs_with_history",
"wikirecs.print_user_history",
"recommenders.ImplicitCollaborativeRecommender",
"recommenders.MostRecentRecommender",
"recommenders.MostFrequentRecommender",
"matplotlib.pyplot.figure",
"numpy.arange",
"wikirecs.recall",
"matplotlib.pyplot.gca",
"wikirecs.get_recs_metrics",
"implicit.als.AlternatingLeastSquares",
"pandas.set_option",
"plotly.express.scatter",
"recommenders.InterleaveRecommender",
"pandas.DataFrame",
"pyarrow.feather.read_feather",
"matplotlib.rcParams.update",
"scipy.sparse.coo_matrix",
"matplotlib.pyplot.rc",
"pandas.concat",
"seaborn.set_theme",
"numpy.random.shuffle",
"matplotlib.pyplot.ylim",
"seaborn.scatterplot",
"numpy.median",
"matplotlib.pyplot.legend",
"pyarrow.feather.write_feather",
"wikirecs.get_resurface_discovery",
"umap.UMAP",
"wikirecs.save_pickle",
"recommenders.PopularityRecommender",
"wikirecs.recall_curve",
"itertools.groupby",
"matplotlib.pyplot.ylabel",
"wikirecs.load_pickle",
"recommenders.JaccardRecommender",
"os.listdir",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"logging.basicConfig",
"pull_edit_histories.get_edit_history",
"implicit.nearest_neighbours.BM25Recommender",
"matplotlib.pyplot.plot",
"recommenders.MyBM25Recommender",
"wikirecs.dataframe_set_subtract",
"scipy.sparse.csc_matrix",
"numpy.array",
"implicit.nearest_neighbours.bm25_weight",
"matplotlib.pyplot.xlabel",
"itables.javascript.load_datatables",
"logging.getLogger",
"wikirecs.prop_resurface"
] |
[((1071, 1088), 'itables.javascript.load_datatables', 'load_datatables', ([], {}), '()\n', (1086, 1088), False, 'from itables.javascript import load_datatables\n'), ((1109, 1147), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(100)'], {}), "('display.max_rows', 100)\n", (1122, 1147), True, 'import pandas as pd\n'), ((1148, 1186), 'pandas.set_option', 'pd.set_option', (['"""display.min_rows"""', '(100)'], {}), "('display.min_rows', 100)\n", (1161, 1186), True, 'import pandas as pd\n'), ((1207, 1228), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (1226, 1228), False, 'import logging\n'), ((1356, 1395), 'os.listdir', 'os.listdir', (['"""edit_histories_2021-05-28"""'], {}), "('edit_histories_2021-05-28')\n", (1366, 1395), False, 'import os\n'), ((1539, 1582), 'pandas.concat', 'pd.concat', (['all_histories'], {'ignore_index': '(True)'}), '(all_histories, ignore_index=True)\n', (1548, 1582), True, 'import pandas as pd\n'), ((1584, 1656), 'pyarrow.feather.write_feather', 'feather.write_feather', (['all_histories', '"""all_histories_2021-05-28.feather"""'], {}), "(all_histories, 'all_histories_2021-05-28.feather')\n", (1605, 1656), False, 'from pyarrow import feather\n'), ((1683, 1739), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""all_histories_2021-05-28.feather"""'], {}), "('all_histories_2021-05-28.feather')\n", (1703, 1739), False, 'from pyarrow import feather\n'), ((1887, 1943), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""all_histories_2021-05-28.feather"""'], {}), "('all_histories_2021-05-28.feather')\n", (1907, 1943), False, 'from pyarrow import feather\n'), ((2782, 2809), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 8)'}), '(figsize=(20, 8))\n', (2792, 2809), True, 'import matplotlib.pyplot as plt\n'), ((2809, 2829), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (2820, 2829), True, 'import matplotlib.pyplot as plt\n'), ((2892, 2929), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of edits by user"""'], {}), "('Number of edits by user')\n", (2902, 2929), True, 'import matplotlib.pyplot as plt\n'), ((2930, 2950), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2941, 2950), True, 'import matplotlib.pyplot as plt\n'), ((3009, 3027), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 200]'], {}), '([0, 200])\n', (3017, 3027), True, 'import matplotlib.pyplot as plt\n'), ((3027, 3064), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of edits by user"""'], {}), "('Number of edits by user')\n", (3037, 3064), True, 'import matplotlib.pyplot as plt\n'), ((3175, 3202), 'numpy.sum', 'np.sum', (['(edit_counts > thres)'], {}), '(edit_counts > thres)\n', (3181, 3202), True, 'import numpy as np\n'), ((3750, 3789), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(1000)'], {}), "('display.max_rows', 1000)\n", (3763, 3789), True, 'import pandas as pd\n'), ((7057, 7136), 'pyarrow.feather.write_feather', 'feather.write_feather', (['clean_histories', '"""../clean_histories_2021-05-28.feather"""'], {}), "(clean_histories, '../clean_histories_2021-05-28.feather')\n", (7078, 7136), False, 'from pyarrow import feather\n'), ((7191, 7252), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""../clean_histories_2021-05-28.feather"""'], {}), "('../clean_histories_2021-05-28.feather')\n", (7211, 7252), False, 'from pyarrow import feather\n'), ((8253, 8371), 'wikirecs.save_pickle', 'wr.save_pickle', (['(p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t)', '"""../lookup_tables_2021-05-28.pickle"""'], {}), "((p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t),\n '../lookup_tables_2021-05-28.pickle')\n", (8267, 8371), True, 'import wikirecs as wr\n'), ((8369, 8443), 'wikirecs.save_pickle', 'wr.save_pickle', (['(userids, pageids)', '"""../users_and_pages_2021-05-28.pickle"""'], {}), "((userids, pageids), '../users_and_pages_2021-05-28.pickle')\n", (8383, 8443), True, 'import wikirecs as wr\n'), ((8542, 8594), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../lookup_tables_2021-05-28.pickle"""'], {}), "('../lookup_tables_2021-05-28.pickle')\n", (8556, 8594), True, 'import wikirecs as wr\n'), ((8614, 8668), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../users_and_pages_2021-05-28.pickle"""'], {}), "('../users_and_pages_2021-05-28.pickle')\n", (8628, 8668), True, 'import wikirecs as wr\n'), ((8882, 8940), 'wikirecs.dataframe_set_subtract', 'wr.dataframe_set_subtract', (['clean_histories', 'histories_test'], {}), '(clean_histories, histories_test)\n', (8907, 8940), True, 'import wikirecs as wr\n'), ((9217, 9274), 'wikirecs.dataframe_set_subtract', 'wr.dataframe_set_subtract', (['histories_train', 'histories_dev'], {}), '(histories_train, histories_dev)\n', (9242, 9274), True, 'import wikirecs as wr\n'), ((9870, 9949), 'pyarrow.feather.write_feather', 'feather.write_feather', (['histories_train', '"""../histories_train_2021-05-28.feather"""'], {}), "(histories_train, '../histories_train_2021-05-28.feather')\n", (9891, 9949), False, 'from pyarrow import feather\n'), ((9950, 10025), 'pyarrow.feather.write_feather', 'feather.write_feather', (['histories_dev', '"""../histories_dev_2021-05-28.feather"""'], {}), "(histories_dev, '../histories_dev_2021-05-28.feather')\n", (9971, 10025), False, 'from pyarrow import feather\n'), ((10026, 10103), 'pyarrow.feather.write_feather', 'feather.write_feather', (['histories_test', '"""../histories_test_2021-05-28.feather"""'], {}), "(histories_test, '../histories_test_2021-05-28.feather')\n", (10047, 10103), False, 'from pyarrow import feather\n'), ((10148, 10206), 'wikirecs.get_resurface_discovery', 'wr.get_resurface_discovery', (['histories_train', 'histories_dev'], {}), '(histories_train, histories_dev)\n', (10174, 10206), True, 'import wikirecs as wr\n'), ((10499, 10607), 'wikirecs.save_pickle', 'wr.save_pickle', (['(resurface_userids, discovery_userids)', '"""../resurface_discovery_users_2021-05-28.pickle"""'], {}), "((resurface_userids, discovery_userids),\n '../resurface_discovery_users_2021-05-28.pickle')\n", (10513, 10607), True, 'import wikirecs as wr\n'), ((10943, 11066), 'pull_edit_histories.get_edit_history', 'get_edit_history', ([], {'user': '"""Thornstrom"""', 'latest_timestamp': '"""2021-05-28T22:02:09Z"""', 'earliest_timestamp': '"""2020-05-28T22:02:09Z"""'}), "(user='Thornstrom', latest_timestamp='2021-05-28T22:02:09Z',\n earliest_timestamp='2020-05-28T22:02:09Z')\n", (10959, 11066), False, 'from pull_edit_histories import get_edit_history\n'), ((11170, 11219), 'wikirecs.print_user_history', 'wr.print_user_history', (['all_histories'], {'user': '"""Rama"""'}), "(all_histories, user='Rama')\n", (11191, 11219), True, 'import wikirecs as wr\n'), ((11221, 11270), 'wikirecs.print_user_history', 'wr.print_user_history', (['all_histories'], {'user': '"""Meow"""'}), "(all_histories, user='Meow')\n", (11242, 11270), True, 'import wikirecs as wr\n'), ((11592, 11646), 'numpy.array', 'np.array', (['[p2i[p] for p in for_implicit.pageid.values]'], {}), '([p2i[p] for p in for_implicit.pageid.values])\n', (11600, 11646), True, 'import numpy as np\n'), ((11653, 11707), 'numpy.array', 'np.array', (['[u2i[u] for u in for_implicit.userid.values]'], {}), '([u2i[u] for u in for_implicit.userid.values])\n', (11661, 11707), True, 'import numpy as np\n'), ((11731, 11782), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(for_implicit.edits.values, (row, col))'], {}), '((for_implicit.edits.values, (row, col)))\n', (11741, 11782), False, 'from scipy.sparse import csr_matrix, csc_matrix, lil_matrix, coo_matrix\n'), ((11803, 11834), 'scipy.sparse.csc_matrix', 'csc_matrix', (['implicit_matrix_coo'], {}), '(implicit_matrix_coo)\n', (11813, 11834), False, 'from scipy.sparse import csr_matrix, csc_matrix, lil_matrix, coo_matrix\n'), ((11849, 11920), 'wikirecs.save_pickle', 'wr.save_pickle', (['implicit_matrix', '"""../implicit_matrix_2021-05-28.pickle"""'], {}), "(implicit_matrix, '../implicit_matrix_2021-05-28.pickle')\n", (11863, 11920), True, 'import wikirecs as wr\n'), ((11974, 12028), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../implicit_matrix_2021-05-28.pickle"""'], {}), "('../implicit_matrix_2021-05-28.pickle')\n", (11988, 12028), True, 'import wikirecs as wr\n'), ((12726, 12780), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../implicit_matrix_2021-05-28.pickle"""'], {}), "('../implicit_matrix_2021-05-28.pickle')\n", (12740, 12780), True, 'import wikirecs as wr\n'), ((12842, 12894), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../lookup_tables_2021-05-28.pickle"""'], {}), "('../lookup_tables_2021-05-28.pickle')\n", (12856, 12894), True, 'import wikirecs as wr\n'), ((12910, 12954), 'implicit.nearest_neighbours.bm25_weight', 'bm25_weight', (['implicit_matrix'], {'K1': '(100)', 'B': '(0.25)'}), '(implicit_matrix, K1=100, B=0.25)\n', (12921, 12954), False, 'from implicit.nearest_neighbours import bm25_weight\n'), ((13044, 13137), 'implicit.als.AlternatingLeastSquares', 'implicit.als.AlternatingLeastSquares', ([], {'factors': 'num_factors', 'regularization': 'regularization'}), '(factors=num_factors, regularization=\n regularization)\n', (13080, 13137), False, 'import implicit\n'), ((13163, 13228), 'wikirecs.save_pickle', 'wr.save_pickle', (['model', "('../als%d_bm25_model.pickle' % num_factors)"], {}), "(model, '../als%d_bm25_model.pickle' % num_factors)\n", (13177, 13228), True, 'import wikirecs as wr\n'), ((13237, 13293), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../als200_bm25_model_2021-05-28.pickle"""'], {}), "('../als200_bm25_model_2021-05-28.pickle')\n", (13251, 13293), True, 'import wikirecs as wr\n'), ((13741, 13789), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../implicit_grid_search.pickle"""'], {}), "('../implicit_grid_search.pickle')\n", (13755, 13789), True, 'import wikirecs as wr\n'), ((13791, 13824), 'pandas.DataFrame', 'pd.DataFrame', (['grid_search_results'], {}), '(grid_search_results)\n', (13803, 13824), True, 'import pandas as pd\n'), ((14076, 14129), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../implicit_grid_search_bm25.pickle"""'], {}), "('../implicit_grid_search_bm25.pickle')\n", (14090, 14129), True, 'import wikirecs as wr\n'), ((14462, 14502), 'implicit.nearest_neighbours.bm25_weight', 'bm25_weight', (['implicit_matrix'], {'K1': '(20)', 'B': '(1)'}), '(implicit_matrix, K1=20, B=1)\n', (14473, 14502), False, 'from implicit.nearest_neighbours import bm25_weight\n'), ((14747, 14769), 'implicit.nearest_neighbours.BM25Recommender', 'BM25Recommender', (['K1', 'B'], {}), '(K1, B)\n', (14762, 14769), False, 'from implicit.nearest_neighbours import BM25Recommender\n'), ((14798, 14851), 'wikirecs.save_pickle', 'wr.save_pickle', (['model', '"""../bm25_model_2021-05-28.pkl"""'], {}), "(model, '../bm25_model_2021-05-28.pkl')\n", (14812, 14851), True, 'import wikirecs as wr\n'), ((15881, 15922), 'wikirecs.save_pickle', 'wr.save_pickle', (['model', '"""b25_model.pickle"""'], {}), "(model, 'b25_model.pickle')\n", (15895, 15922), True, 'import wikirecs as wr\n'), ((15932, 15966), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""b25_model.pickle"""'], {}), "('b25_model.pickle')\n", (15946, 15966), True, 'import wikirecs as wr\n'), ((16231, 16279), 'wikirecs.print_user_history', 'wr.print_user_history', (['clean_histories'], {'userid': 'u'}), '(clean_histories, userid=u)\n', (16252, 16279), True, 'import wikirecs as wr\n'), ((16568, 16608), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../als150_model.pickle"""'], {}), "('../als150_model.pickle')\n", (16582, 16608), True, 'import wikirecs as wr\n'), ((17024, 17052), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (17034, 17052), True, 'import matplotlib.pyplot as plt\n'), ((17052, 17099), 'matplotlib.pyplot.plot', 'plt.plot', (['embedding[:, 0]', 'embedding[:, 1]', '"""."""'], {}), "(embedding[:, 0], embedding[:, 1], '.')\n", (17060, 17099), True, 'import matplotlib.pyplot as plt\n'), ((18470, 18582), 'plotly.express.scatter', 'px.scatter', ([], {'data_frame': 'emb_df', 'x': '"""dim1"""', 'y': '"""dim2"""', 'hover_name': '"""title"""', 'color': 'key', 'hover_data': "['edit_count']"}), "(data_frame=emb_df, x='dim1', y='dim2', hover_name='title', color\n =key, hover_data=['edit_count'])\n", (18480, 18582), True, 'import plotly.express as px\n'), ((19111, 19236), 'plotly.express.scatter', 'px.scatter', ([], {'data_frame': 'emb_df', 'x': '"""dim1"""', 'y': '"""dim2"""', 'hover_name': '"""title"""', 'color': '"""log_edit_count"""', 'hover_data': "['edit_count']"}), "(data_frame=emb_df, x='dim1', y='dim2', hover_name='title', color\n ='log_edit_count', hover_data=['edit_count'])\n", (19121, 19236), True, 'import plotly.express as px\n'), ((19516, 19577), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""../histories_train_2021-05-28.feather"""'], {}), "('../histories_train_2021-05-28.feather')\n", (19536, 19577), False, 'from pyarrow import feather\n'), ((19595, 19655), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""../histories_test_2021-05-28.feather"""'], {}), "('../histories_test_2021-05-28.feather')\n", (19615, 19655), False, 'from pyarrow import feather\n'), ((19672, 19731), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""../histories_dev_2021-05-28.feather"""'], {}), "('../histories_dev_2021-05-28.feather')\n", (19692, 19731), False, 'from pyarrow import feather\n'), ((19751, 19805), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../implicit_matrix_2021-05-28.pickle"""'], {}), "('../implicit_matrix_2021-05-28.pickle')\n", (19765, 19805), True, 'import wikirecs as wr\n'), ((19867, 19919), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../lookup_tables_2021-05-28.pickle"""'], {}), "('../lookup_tables_2021-05-28.pickle')\n", (19881, 19919), True, 'import wikirecs as wr\n'), ((19940, 19994), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../users_and_pages_2021-05-28.pickle"""'], {}), "('../users_and_pages_2021-05-28.pickle')\n", (19954, 19994), True, 'import wikirecs as wr\n'), ((20037, 20101), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../resurface_discovery_users_2021-05-28.pickle"""'], {}), "('../resurface_discovery_users_2021-05-28.pickle')\n", (20051, 20101), True, 'import wikirecs as wr\n'), ((20123, 20258), 'wikirecs.display_recs_with_history', 'wr.display_recs_with_history', (['recs', 'userids[:100]', 'histories_test', 'histories_train', 'p2t', 'u2n'], {'recs_to_display': '(5)', 'hist_to_display': '(10)'}), '(recs, userids[:100], histories_test,\n histories_train, p2t, u2n, recs_to_display=5, hist_to_display=10)\n', (20151, 20258), True, 'import wikirecs as wr\n'), ((20360, 20411), 'recommenders.PopularityRecommender', 'recommenders.PopularityRecommender', (['histories_train'], {}), '(histories_train)\n', (20394, 20411), False, 'import recommenders\n'), ((20451, 20507), 'wikirecs.save_pickle', 'wr.save_pickle', (['precs', "('../' + rec_name + '_recs.pickle')"], {}), "(precs, '../' + rec_name + '_recs.pickle')\n", (20465, 20507), True, 'import wikirecs as wr\n'), ((20534, 20647), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', 'precs', 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), '(histories_dev, precs, K, discovery_userids,\n resurface_userids, implicit_matrix, i2p, u2i)\n', (20553, 20647), True, 'import wikirecs as wr\n'), ((20727, 20778), 'recommenders.MostRecentRecommender', 'recommenders.MostRecentRecommender', (['histories_train'], {}), '(histories_train)\n', (20761, 20778), False, 'import recommenders\n'), ((20868, 20924), 'wikirecs.save_pickle', 'wr.save_pickle', (['rrecs', "('../' + rec_name + '_recs.pickle')"], {}), "(rrecs, '../' + rec_name + '_recs.pickle')\n", (20882, 20924), True, 'import wikirecs as wr\n'), ((20982, 21095), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', 'rrecs', 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), '(histories_dev, rrecs, K, discovery_userids,\n resurface_userids, implicit_matrix, i2p, u2i)\n', (21001, 21095), True, 'import wikirecs as wr\n'), ((21188, 21241), 'recommenders.MostFrequentRecommender', 'recommenders.MostFrequentRecommender', (['histories_train'], {}), '(histories_train)\n', (21224, 21241), False, 'import recommenders\n'), ((21333, 21389), 'wikirecs.save_pickle', 'wr.save_pickle', (['frecs', "('../' + rec_name + '_recs.pickle')"], {}), "(frecs, '../' + rec_name + '_recs.pickle')\n", (21347, 21389), True, 'import wikirecs as wr\n'), ((21411, 21524), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', 'frecs', 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), '(histories_dev, frecs, K, discovery_userids,\n resurface_userids, implicit_matrix, i2p, u2i)\n', (21430, 21524), True, 'import wikirecs as wr\n'), ((21577, 21631), 'recommenders.MyBM25Recommender', 'recommenders.MyBM25Recommender', (['model', 'implicit_matrix'], {}), '(model, implicit_matrix)\n', (21607, 21631), False, 'import recommenders\n'), ((21751, 21807), 'wikirecs.save_pickle', 'wr.save_pickle', (['brecs', "('../' + rec_name + '_recs.pickle')"], {}), "(brecs, '../' + rec_name + '_recs.pickle')\n", (21765, 21807), True, 'import wikirecs as wr\n'), ((21865, 21978), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', 'brecs', 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), '(histories_dev, brecs, K, discovery_userids,\n resurface_userids, implicit_matrix, i2p, u2i)\n', (21884, 21978), True, 'import wikirecs as wr\n'), ((22170, 22235), 'wikirecs.save_pickle', 'wr.save_pickle', (['brecs_filtered', "('../' + rec_name + '_recs.pickle')"], {}), "(brecs_filtered, '../' + rec_name + '_recs.pickle')\n", (22184, 22235), True, 'import wikirecs as wr\n'), ((22257, 22386), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', "recs['bm25_filtered']", 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), "(histories_dev, recs['bm25_filtered'], K,\n discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)\n", (22276, 22386), True, 'import wikirecs as wr\n'), ((22427, 22556), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', "recs['bm25_filtered']", 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), "(histories_dev, recs['bm25_filtered'], K,\n discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)\n", (22446, 22556), True, 'import wikirecs as wr\n'), ((22632, 22688), 'wikirecs.load_pickle', 'wr.load_pickle', (['"""../als200_bm25_model_2021-05-28.pickle"""'], {}), "('../als200_bm25_model_2021-05-28.pickle')\n", (22646, 22688), True, 'import wikirecs as wr\n'), ((22888, 22944), 'wikirecs.save_pickle', 'wr.save_pickle', (['irecs', "('../' + rec_name + '_recs.pickle')"], {}), "(irecs, '../' + rec_name + '_recs.pickle')\n", (22902, 22944), True, 'import wikirecs as wr\n'), ((23476, 23541), 'wikirecs.save_pickle', 'wr.save_pickle', (['irecs_filtered', "('../' + rec_name + '_recs.pickle')"], {}), "(irecs_filtered, '../' + rec_name + '_recs.pickle')\n", (23490, 23541), True, 'import wikirecs as wr\n'), ((23628, 23679), 'recommenders.MostRecentRecommender', 'recommenders.MostRecentRecommender', (['histories_train'], {}), '(histories_train)\n', (23662, 23679), False, 'import recommenders\n'), ((23771, 23886), 'recommenders.JaccardRecommender', 'recommenders.JaccardRecommender', (['implicit_matrix'], {'p2i': 'p2i', 't2i': 't2i', 'i2t': 'i2t', 'i2p': 'i2p', 'n2i': 'n2i', 'u2i': 'u2i', 'i2u': 'i2u'}), '(implicit_matrix, p2i=p2i, t2i=t2i, i2t=i2t,\n i2p=i2p, n2i=n2i, u2i=u2i, i2u=i2u)\n', (23802, 23886), False, 'import recommenders\n'), ((24156, 24202), 'wikirecs.save_pickle', 'wr.save_pickle', (['jrecs', '"""jaccard-1_recs.pickle"""'], {}), "(jrecs, 'jaccard-1_recs.pickle')\n", (24170, 24202), True, 'import wikirecs as wr\n'), ((24244, 24357), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', 'jrecs', 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), '(histories_dev, jrecs, K, discovery_userids,\n resurface_userids, implicit_matrix, i2p, u2i)\n', (24263, 24357), True, 'import wikirecs as wr\n'), ((24378, 24513), 'wikirecs.display_recs_with_history', 'wr.display_recs_with_history', (['jrecs', 'userids[:30]', 'histories_test', 'histories_train', 'p2t', 'u2n'], {'recs_to_display': '(5)', 'hist_to_display': '(10)'}), '(jrecs, userids[:30], histories_test,\n histories_train, p2t, u2n, recs_to_display=5, hist_to_display=10)\n', (24406, 24513), True, 'import wikirecs as wr\n'), ((24586, 24701), 'recommenders.JaccardRecommender', 'recommenders.JaccardRecommender', (['implicit_matrix'], {'p2i': 'p2i', 't2i': 't2i', 'i2t': 'i2t', 'i2p': 'i2p', 'n2i': 'n2i', 'u2i': 'u2i', 'i2u': 'i2u'}), '(implicit_matrix, p2i=p2i, t2i=t2i, i2t=i2t,\n i2p=i2p, n2i=n2i, u2i=u2i, i2u=i2u)\n', (24617, 24701), False, 'import recommenders\n'), ((25535, 25571), 'recommenders.InterleaveRecommender', 'recommenders.InterleaveRecommender', ([], {}), '()\n', (25569, 25571), False, 'import recommenders\n'), ((25648, 25706), 'wikirecs.save_pickle', 'wr.save_pickle', (['intrecs', "('../' + rec_name + '_recs.pickle')"], {}), "(intrecs, '../' + rec_name + '_recs.pickle')\n", (25662, 25706), True, 'import wikirecs as wr\n'), ((25731, 25846), 'wikirecs.get_recs_metrics', 'wr.get_recs_metrics', (['histories_dev', 'intrecs', 'K', 'discovery_userids', 'resurface_userids', 'implicit_matrix', 'i2p', 'u2i'], {}), '(histories_dev, intrecs, K, discovery_userids,\n resurface_userids, implicit_matrix, i2p, u2i)\n', (25750, 25846), True, 'import wikirecs as wr\n'), ((31005, 31101), 'plotly.express.scatter', 'px.scatter', ([], {'data_frame': 'results_df', 'x': '"""ndcg_discover"""', 'y': '"""ndcg_resurface"""', 'hover_name': '"""index"""'}), "(data_frame=results_df, x='ndcg_discover', y='ndcg_resurface',\n hover_name='index')\n", (31015, 31101), True, 'import plotly.express as px\n'), ((31206, 31306), 'plotly.express.scatter', 'px.scatter', ([], {'data_frame': 'results_df', 'x': '"""recall_discover"""', 'y': '"""recall_resurface"""', 'hover_name': '"""index"""'}), "(data_frame=results_df, x='recall_discover', y='recall_resurface',\n hover_name='index')\n", (31216, 31306), True, 'import plotly.express as px\n'), ((31635, 31666), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (31648, 31666), True, 'import seaborn as sns\n'), ((31667, 31767), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 48, 'figure.figsize': (8, 5), 'legend.edgecolor': 'k'}"], {}), "({'font.size': 48, 'figure.figsize': (8, 5),\n 'legend.edgecolor': 'k'})\n", (31693, 31767), False, 'import matplotlib\n'), ((31763, 31790), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (31773, 31790), True, 'import matplotlib.pyplot as plt\n'), ((32073, 32093), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '""":k"""'], {}), "(x, y, ':k')\n", (32081, 32093), True, 'import matplotlib.pyplot as plt\n'), ((32154, 32182), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(3)'}), "('xtick', labelsize=3)\n", (32160, 32182), True, 'import matplotlib.pyplot as plt\n'), ((32266, 32295), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (32279, 32295), False, 'import matplotlib\n'), ((32297, 32317), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '""":k"""'], {}), "(x, y, ':k')\n", (32305, 32317), True, 'import matplotlib.pyplot as plt\n'), ((32317, 32351), 'matplotlib.pyplot.plot', 'plt.plot', (['A', 'B', '"""."""'], {'MarkerSize': '(15)'}), "(A, B, '.', MarkerSize=15)\n", (32325, 32351), True, 'import matplotlib.pyplot as plt\n'), ((32709, 32759), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall@20 discovery (%)"""'], {'fontsize': '(20)'}), "('Recall@20 discovery (%)', fontsize=20)\n", (32719, 32759), True, 'import matplotlib.pyplot as plt\n'), ((32759, 32809), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Recall@20 resurface (%)"""'], {'fontsize': '(20)'}), "('Recall@20 resurface (%)', fontsize=20)\n", (32769, 32809), True, 'import matplotlib.pyplot as plt\n'), ((32809, 32825), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 3]'], {}), '([0, 3])\n', (32817, 32825), True, 'import matplotlib.pyplot as plt\n'), ((32825, 32843), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-2, 85]'], {}), '([-2, 85])\n', (32833, 32843), True, 'import matplotlib.pyplot as plt\n'), ((32850, 32859), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (32857, 32859), True, 'import matplotlib.pyplot as plt\n'), ((33146, 33205), 'pyarrow.feather.read_feather', 'feather.read_feather', (['"""../histories_dev_2021-05-28.feather"""'], {}), "('../histories_dev_2021-05-28.feather')\n", (33166, 33205), False, 'from pyarrow import feather\n'), ((33207, 33235), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (33217, 33235), True, 'import matplotlib.pyplot as plt\n'), ((33396, 33425), 'matplotlib.pyplot.legend', 'plt.legend', (['recommender_names'], {}), '(recommender_names)\n', (33406, 33425), True, 'import matplotlib.pyplot as plt\n'), ((33427, 33455), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (33437, 33455), True, 'import matplotlib.pyplot as plt\n'), ((33605, 33634), 'matplotlib.pyplot.legend', 'plt.legend', (['recommender_names'], {}), '(recommender_names)\n', (33615, 33634), True, 'import matplotlib.pyplot as plt\n'), ((33636, 33664), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (33646, 33664), True, 'import matplotlib.pyplot as plt\n'), ((33814, 33843), 'matplotlib.pyplot.legend', 'plt.legend', (['recommender_names'], {}), '(recommender_names)\n', (33824, 33843), True, 'import matplotlib.pyplot as plt\n'), ((33880, 33911), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (33893, 33911), True, 'import seaborn as sns\n'), ((33912, 34012), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 18, 'figure.figsize': (8, 5), 'legend.edgecolor': 'k'}"], {}), "({'font.size': 18, 'figure.figsize': (8, 5),\n 'legend.edgecolor': 'k'})\n", (33938, 34012), False, 'import matplotlib\n'), ((34006, 34033), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (34016, 34033), True, 'import matplotlib.pyplot as plt\n'), ((34208, 34306), 'matplotlib.pyplot.legend', 'plt.legend', (["['ALS', 'BM25']"], {'title': '"""Algorithm"""', 'fontsize': '(16)', 'title_fontsize': '(16)', 'facecolor': '"""w"""'}), "(['ALS', 'BM25'], title='Algorithm', fontsize=16, title_fontsize=\n 16, facecolor='w')\n", (34218, 34306), True, 'import matplotlib.pyplot as plt\n'), ((34301, 34330), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""@N"""'], {'fontsize': '(20)'}), "('@N', fontsize=20)\n", (34311, 34330), True, 'import matplotlib.pyplot as plt\n'), ((34330, 34377), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Discovery recall (%)"""'], {'fontsize': '(20)'}), "('Discovery recall (%)', fontsize=20)\n", (34340, 34377), True, 'import matplotlib.pyplot as plt\n'), ((37355, 37424), 'recommenders.ImplicitCollaborativeRecommender', 'recommenders.ImplicitCollaborativeRecommender', (['model', 'implicit_matrix'], {}), '(model, implicit_matrix)\n', (37400, 37424), False, 'import recommenders\n'), ((37948, 37972), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (37965, 37972), True, 'import numpy as np\n'), ((38219, 38243), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (38236, 38243), True, 'import numpy as np\n'), ((38888, 38918), 'numpy.array', 'np.array', (['[r[1] for r in runs]'], {}), '([r[1] for r in runs])\n', (38896, 38918), True, 'import numpy as np\n'), ((38935, 38952), 'numpy.sum', 'np.sum', (['(lens == 1)'], {}), '(lens == 1)\n', (38941, 38952), True, 'import numpy as np\n'), ((12242, 12290), 'numpy.sum', 'np.sum', (['(implicit_matrix[:, veditors] > 0)'], {'axis': '(1)'}), '(implicit_matrix[:, veditors] > 0, axis=1)\n', (12248, 12290), True, 'import numpy as np\n'), ((12626, 12677), 'wikirecs.print_user_history', 'wr.print_user_history', (['all_histories'], {'user': 'i2n[ved]'}), '(all_histories, user=i2n[ved])\n', (12647, 12677), True, 'import wikirecs as wr\n'), ((28147, 28168), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (28159, 28168), True, 'import pandas as pd\n'), ((28530, 28584), 'seaborn.scatterplot', 'sns.scatterplot', (['x', 'y'], {'data': 'data', 'size': '(8)', 'legend': '(False)'}), '(x, y, data=data, size=8, legend=False)\n', (28545, 28584), True, 'import seaborn as sns\n'), ((28887, 28903), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (28896, 28903), True, 'import matplotlib.pyplot as plt\n'), ((28908, 28926), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (28918, 28926), True, 'import matplotlib.pyplot as plt\n'), ((28931, 28949), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (28941, 28949), True, 'import matplotlib.pyplot as plt\n'), ((33030, 33076), 'wikirecs.load_pickle', 'wr.load_pickle', (["('../' + rname + '_recs.pickle')"], {}), "('../' + rname + '_recs.pickle')\n", (33044, 33076), True, 'import wikirecs as wr\n'), ((33286, 33333), 'wikirecs.recall_curve', 'wr.recall_curve', (['histories_dev', 'recs[rname]', '(20)'], {}), '(histories_dev, recs[rname], 20)\n', (33301, 33333), True, 'import wikirecs as wr\n'), ((33368, 33396), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_curve', '""".-"""'], {}), "(recall_curve, '.-')\n", (33376, 33396), True, 'import matplotlib.pyplot as plt\n'), ((33506, 33572), 'wikirecs.recall_curve', 'wr.recall_curve', (['histories_dev', 'recs[rname]', '(20)', 'discovery_userids'], {}), '(histories_dev, recs[rname], 20, discovery_userids)\n', (33521, 33572), True, 'import wikirecs as wr\n'), ((33577, 33605), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_curve', '""".-"""'], {}), "(recall_curve, '.-')\n", (33585, 33605), True, 'import matplotlib.pyplot as plt\n'), ((33715, 33781), 'wikirecs.recall_curve', 'wr.recall_curve', (['histories_dev', 'recs[rname]', '(20)', 'resurface_userids'], {}), '(histories_dev, recs[rname], 20, resurface_userids)\n', (33730, 33781), True, 'import wikirecs as wr\n'), ((33786, 33814), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_curve', '""".-"""'], {}), "(recall_curve, '.-')\n", (33794, 33814), True, 'import matplotlib.pyplot as plt\n'), ((34081, 34147), 'wikirecs.recall_curve', 'wr.recall_curve', (['histories_dev', 'recs[rname]', '(20)', 'discovery_userids'], {}), '(histories_dev, recs[rname], 20, discovery_userids)\n', (34096, 34147), True, 'import wikirecs as wr\n'), ((34392, 34411), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(2)'], {}), '(0, 20, 2)\n', (34401, 34411), True, 'import numpy as np\n'), ((35535, 35637), 'pandas.DataFrame', 'pd.DataFrame', (['{rec_name: [p2t[r] for r in recs[rec_name][userid][:N]] for rec_name in\n recs_subset}'], {}), '({rec_name: [p2t[r] for r in recs[rec_name][userid][:N]] for\n rec_name in recs_subset})\n', (35547, 35637), True, 'import pandas as pd\n'), ((37028, 37141), 'pandas.DataFrame', 'pd.DataFrame', (["{rec_name: [p2t[r] for r in recs[rec_name][n2u['HenryXVII']]][:N] for\n rec_name in recs_subset}"], {}), "({rec_name: [p2t[r] for r in recs[rec_name][n2u['HenryXVII']]][\n :N] for rec_name in recs_subset})\n", (37040, 37141), True, 'import pandas as pd\n'), ((38336, 38421), 'wikirecs.print_user_history', 'wr.print_user_history', ([], {'user': 'edited_pages.index[i]', 'all_histories': 'clean_histories'}), '(user=edited_pages.index[i], all_histories=clean_histories\n )\n', (38357, 38421), True, 'import wikirecs as wr\n'), ((1229, 1248), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1246, 1248), False, 'import logging\n'), ((2517, 2571), 'pandas.concat', 'pd.concat', (['[all_histories, oneuser]'], {'ignore_index': '(True)'}), '([all_histories, oneuser], ignore_index=True)\n', (2526, 2571), True, 'import pandas as pd\n'), ((2868, 2892), 'numpy.arange', 'np.arange', (['(0)', '(20000)', '(200)'], {}), '(0, 20000, 200)\n', (2877, 2892), True, 'import numpy as np\n'), ((2989, 3009), 'numpy.arange', 'np.arange', (['(0)', '(200)', '(1)'], {}), '(0, 200, 1)\n', (2998, 3009), True, 'import numpy as np\n'), ((3128, 3150), 'numpy.median', 'np.median', (['edit_counts'], {}), '(edit_counts)\n', (3137, 3150), True, 'import numpy as np\n'), ((6531, 6547), 'numpy.array', 'np.array', (['is_bot'], {}), '(is_bot)\n', (6539, 6547), True, 'import numpy as np\n'), ((11130, 11151), 'pandas.DataFrame', 'pd.DataFrame', (['oneuser'], {}), '(oneuser)\n', (11142, 11151), True, 'import pandas as pd\n'), ((12311, 12359), 'numpy.sum', 'np.sum', (['(implicit_matrix[:, veditors] > 0)'], {'axis': '(1)'}), '(implicit_matrix[:, veditors] > 0, axis=1)\n', (12317, 12359), True, 'import numpy as np\n'), ((14600, 14620), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(1)'], {}), '(0, 100, 1)\n', (14609, 14620), True, 'import numpy as np\n'), ((14686, 14706), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(1)'], {}), '(0, 100, 1)\n', (14695, 14706), True, 'import numpy as np\n'), ((16966, 16977), 'umap.UMAP', 'umap.UMAP', ([], {}), '()\n', (16975, 16977), False, 'import umap\n'), ((17210, 17253), 'numpy.sum', 'np.sum', (['implicit_matrix[indices, :]'], {'axis': '(1)'}), '(implicit_matrix[indices, :], axis=1)\n', (17216, 17253), True, 'import numpy as np\n'), ((17891, 17940), 'numpy.sum', 'np.sum', (['implicit_matrix[actor_indices, :]'], {'axis': '(1)'}), '(implicit_matrix[actor_indices, :], axis=1)\n', (17897, 17940), True, 'import numpy as np\n'), ((18053, 18064), 'umap.UMAP', 'umap.UMAP', ([], {}), '()\n', (18062, 18064), False, 'import umap\n'), ((23547, 23568), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (23559, 23568), True, 'import pandas as pd\n'), ((34410, 34429), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(2)'], {}), '(0, 20, 2)\n', (34419, 34429), True, 'import numpy as np\n'), ((38482, 38504), 'numpy.arange', 'np.arange', (['(0)', '(2000)', '(20)'], {}), '(0, 2000, 20)\n', (38491, 38504), True, 'import numpy as np\n'), ((5531, 5559), 'numpy.sum', 'np.sum', (['is_popular_page_edit'], {}), '(is_popular_page_edit)\n', (5537, 5559), True, 'import numpy as np\n'), ((6592, 6609), 'numpy.sum', 'np.sum', (['keep_user'], {}), '(keep_user)\n', (6598, 6609), True, 'import numpy as np\n'), ((16687, 16730), 'numpy.sum', 'np.sum', (['implicit_matrix[nonzero, :]'], {'axis': '(1)'}), '(implicit_matrix[nonzero, :], axis=1)\n', (16693, 16730), True, 'import numpy as np\n'), ((17303, 17346), 'numpy.sum', 'np.sum', (['implicit_matrix[indices, :]'], {'axis': '(1)'}), '(implicit_matrix[indices, :], axis=1)\n', (17309, 17346), True, 'import numpy as np\n'), ((17990, 18039), 'numpy.sum', 'np.sum', (['implicit_matrix[actor_indices, :]'], {'axis': '(1)'}), '(implicit_matrix[actor_indices, :], axis=1)\n', (17996, 18039), True, 'import numpy as np\n'), ((25112, 25166), 'wikirecs.prop_resurface', 'wr.prop_resurface', (['jrecs', 'K', 'implicit_matrix', 'i2p', 'u2i'], {}), '(jrecs, K, implicit_matrix, i2p, u2i)\n', (25129, 25166), True, 'import wikirecs as wr\n'), ((32446, 32455), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (32453, 32455), True, 'import matplotlib.pyplot as plt\n'), ((32562, 32571), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (32569, 32571), True, 'import matplotlib.pyplot as plt\n'), ((32641, 32650), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (32648, 32650), True, 'import matplotlib.pyplot as plt\n'), ((34161, 34183), 'numpy.array', 'np.array', (['recall_curve'], {}), '(recall_curve)\n', (34169, 34183), True, 'import numpy as np\n'), ((34482, 34491), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (34489, 34491), True, 'import matplotlib.pyplot as plt\n'), ((34561, 34570), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (34568, 34570), True, 'import matplotlib.pyplot as plt\n'), ((38805, 38825), 'itertools.groupby', 'itertools.groupby', (['d'], {}), '(d)\n', (38822, 38825), False, 'import itertools\n'), ((2459, 2480), 'pandas.DataFrame', 'pd.DataFrame', (['oneuser'], {}), '(oneuser)\n', (2471, 2480), True, 'import pandas as pd\n'), ((4596, 4629), 'itertools.groupby', 'itertools.groupby', (['d'], {'key': 'keyfunc'}), '(d, key=keyfunc)\n', (4613, 4629), False, 'import itertools\n'), ((25035, 25070), 'wikirecs.recall', 'wr.recall', (['histories_test', 'jrecs', 'K'], {}), '(histories_test, jrecs, K)\n', (25044, 25070), True, 'import wikirecs as wr\n'), ((25219, 25287), 'wikirecs.recall', 'wr.recall', (['histories_test', 'jrecs', 'K'], {'userid_subset': 'discovery_userids'}), '(histories_test, jrecs, K, userid_subset=discovery_userids)\n', (25228, 25287), True, 'import wikirecs as wr\n'), ((25340, 25408), 'wikirecs.recall', 'wr.recall', (['histories_test', 'jrecs', 'K'], {'userid_subset': 'resurface_userids'}), '(histories_test, jrecs, K, userid_subset=resurface_userids)\n', (25349, 25408), True, 'import wikirecs as wr\n'), ((32483, 32500), 'numpy.array', 'np.array', (['xyz[1:]'], {}), '(xyz[1:])\n', (32491, 32500), True, 'import numpy as np\n'), ((5652, 5680), 'numpy.sum', 'np.sum', (['is_popular_page_edit'], {}), '(is_popular_page_edit)\n', (5658, 5680), True, 'import numpy as np\n'), ((6648, 6665), 'numpy.sum', 'np.sum', (['keep_user'], {}), '(keep_user)\n', (6654, 6665), True, 'import numpy as np\n')]
|
import random
import os.path
import pygame
import sys
from pygame.locals import *
WIDTH = 800
HEIGHT = 640
FPS = 60
POWERUP_TIME = 4000
RELOAD = 300
NUMSTARS = 30
TYPING_SPEED = 300
PLAYER_MAX_HEALTH = 100
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
YELLOW = (255, 211, 0)
LIGHT_GREEN = (185, 235, 98)
FONT = 'MyFont.ttf'
pygame.mixer.pre_init(44100, -16, 1, 512) # Decreasing the size of the buffer will reduce the latency
pygame.mixer.init() # handles sound
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('Save The Galaxy')
clock = pygame.time.Clock()
if hasattr(sys, '_MEIPASS'):
main_dir = sys._MEIPASS
else: main_dir = os.path.split(os.path.abspath(__file__))[0] + '\\data'
textfile_dir = os.path.split(os.path.abspath(__file__))[0]
FONT = main_dir + '\\' + FONT
def loadImage(file):
file = os.path.join(main_dir, file)
img = pygame.image.load(file)
return img.convert_alpha()
iconImg = pygame.transform.scale(loadImage('icon.png'), (30, 30))
pygame.display.set_icon(iconImg)
loadingScreenImg = pygame.transform.scale(loadImage('loadingscreen.png'), (WIDTH, HEIGHT))
loadingScreenImgRect = loadingScreenImg.get_rect()
screen.blit(loadingScreenImg, loadingScreenImgRect)
pygame.display.update()
def loadSound(file):
file = os.path.join(main_dir, file)
sound = pygame.mixer.Sound(file)
return sound
def printText(surface, text, size, x, y, color, center = 0):
font = pygame.font.Font(FONT, size)
font.set_bold(True)
textSurface = font.render(text, True, color)
text_rect = textSurface.get_rect()
if center == 0:
text_rect.bottomleft = (x, y)
else:
text_rect.center = center
surface.blit(textSurface, text_rect)
def slowType(s, y):
global TYPING_SPEED
typeFPS = 60
k = len(s)
i = 0
x = 30
lastLetter = pygame.time.get_ticks()
while i < k:
clock.tick(typeFPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.KEYDOWN:
if event.key == K_KP_ENTER or event.key == K_ESCAPE:
typeFPS = 0
if (pygame.time.get_ticks() - lastLetter) > (random.random()*TYPING_SPEED):
printText(screen, s[i], 16, x, y, YELLOW)
keyPress_sound.play()
pygame.display.update()
x += 16
i += 1
lastLetter = pygame.time.get_ticks()
def showStory():
screen.blit(storyImg, storyImgRect)
pygame.display.update()
story_music.play(-1)
slowType('GREETINGS BRAVE WARRIOR,', 20)
slowType('YOUR GALAXY IS IN GREAT DANGER', 40)
slowType('OF RUTHLESS ALIEN INVASION', 60)
slowType('YOU HAVE BEEN CHOSEN', 80)
slowType('TO FACE AGAINST THIS TYRANNY', 100)
slowType('YOU GOT MOST ADVANCED SPACE SHIP', 120)
slowType('YOU HAVE ASSIGNMENT TO DESTROY ENEMY ARMY', 140)
slowType('AND DEFEAT CAPTAIN, GENERAL AND LEADER.', 160)
slowType('IF YOU ACCOMPLISH THIS MISSION SUCCESSFULLY,', 180)
slowType('WHOLE GALAXY WILL BE ETERNALLY GRATEFUL AND', 200)
slowType('MAY THE FORCE ALWAYS BE ON YOUR SIDE', 220)
slowType('PRESS ANY KEY TO CONTINUE...', 260)
while True:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.KEYDOWN:
story_music.stop()
showGameStartScreen()
def drawHealthBar(surface, x, y, health, healthColor, maxhealth, barLength):
if health < 0:
health = 0
barHeight = 25
fill = (health / maxhealth) * barLength
outlineRect = pygame.Rect(x, y, barLength, barHeight)
fillRect = pygame.Rect(x, y, fill, barHeight)
pygame.draw.rect(surface, healthColor, fillRect)
pygame.draw.rect(surface, WHITE, outlineRect, 2)
def drawLives(surface, x, y, lives, img):
for i in range(lives):
imgRect = img.get_rect()
imgRect.x = x + 35*i
imgRect.y = y
surface.blit(img, imgRect)
class Player(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = playerImg
self.rect = self.image.get_rect()
self.radius = 22
self.rect.bottom = HEIGHT - 30
self.rect.centerx = WIDTH / 2
self.speedx = 5
self.speedy = 3
self.lives = 3
self.health = PLAYER_MAX_HEALTH
self.hidden = False
self.hide_timer = pygame.time.get_ticks()
self.immune = False
self.immune_timer = pygame.time.get_ticks()
self.powerLvl = 1
self.power_timer = pygame.time.get_ticks()
self.shoot_timer = pygame.time.get_ticks()
self.score = 0
def update(self):
if self.immune:
self.image = playerImg_immune
else:
self.image = playerImg
if player.lives < 1:
pygame.mixer.music.stop()
boss_fight_music.stop()
pygame.mixer.music.play(-1)
showGameOverScreen()
if self.powerLvl > 1:
if pygame.time.get_ticks() - self.power_timer > POWERUP_TIME:
self.powerLvl = 1
self.power_timer = pygame.time.get_ticks()
if self.hidden and pygame.time.get_ticks() - self.hide_timer > 1200:
self.hidden = False
self.rect.bottom = HEIGHT - 30
self.rect.centerx = WIDTH / 2
self.immune = True
self.immune_timer = pygame.time.get_ticks()
if self.immune and pygame.time.get_ticks() - self.immune_timer > 1500:
self.immune = False
keystate = pygame.key.get_pressed()
if keystate[K_LEFT]:
self.rect.x -= self.speedx
if keystate[K_RIGHT]:
self.rect.x += self.speedx
if keystate[K_UP]:
self.rect.y -= self.speedy
if keystate[K_DOWN]:
self.rect.y += self.speedy
if self.rect.right > WIDTH + 20:
self.rect.right = WIDTH + 20
if self.rect.left < -20 and self.rect.left > -200:
self.rect.left = -20
if self.rect.top <= 0 and self.rect.top > -200:
self.rect.top = 0
if self.rect.bottom >= HEIGHT - 30:
self.rect.bottom = HEIGHT - 30
def shoot(self):
if not self.hidden:
self.shoot_timer = pygame.time.get_ticks()
if self.powerLvl == 1:
bullet = Bullet(self.rect.centerx, self.rect.top)
allSprites.add(bullet)
bullets.add(bullet)
shoot_sound.play()
elif self.powerLvl == 2:
bullet1 = Bullet(self.rect.left+5, self.rect.centery)
bullet2 = Bullet(self.rect.right-5, self.rect.centery)
allSprites.add(bullet1, bullet2)
bullets.add(bullet1, bullet2)
shoot_sound.play()
else:
bullet = Bullet(self.rect.centerx, self.rect.top)
bullet1 = Bullet(self.rect.left + 5, self.rect.centery)
bullet2 = Bullet(self.rect.right - 5, self.rect.centery)
allSprites.add(bullet, bullet1, bullet2)
bullets.add(bullet, bullet1, bullet2)
shoot_sound.play()
def hide(self):
self.hidden = True
self.hide_timer = pygame.time.get_ticks()
self.rect.center = (-500, -500)
def powerup(self):
self.powerLvl += 1
self.power_timer = pygame.time.get_ticks()
def reset(self):
self.rect.bottom = HEIGHT - 30
self.rect.centerx = WIDTH / 2
self.lives = 3
self.health = PLAYER_MAX_HEALTH
self.hidden = False
self.powerLvl = 1
self.score = 0
class Alien(pygame.sprite.Sprite):
def __init__(self, x, y, img1, img2, smartShoot, fly):
pygame.sprite.Sprite.__init__(self)
self.img1 = img1
self.img2 = img2
self.image = self.img1
self.rect = self.image.get_rect()
self.radius = 20
self.rect.x = x
self.rect.y = y
self.speedy = 0
self.speedx = random.randrange(1, 3)
self.direction = 1
self.lastUpdate = pygame.time.get_ticks()
self.lastBomb = pygame.time.get_ticks()
self.smartShoot = smartShoot
self.canFly = fly
self.fly = False
self.fly_timer = pygame.time.get_ticks()
self.starty = self.rect.y
self.hitbottom = False
self.flyTime = random.randrange(5000, 30000)
def move(self, direction, y = 0):
if self.rect.y < self.starty:
self.rect.y = self.starty
self.fly = False
if y == 0:
self.rect.x += self.speedx * self.direction
else:
self.rect.y += 4 * direction
if self.rect.bottom > player.rect.bottom:
self.rect.bottom = player.rect.bottom
self.hitbottom = True
if self.rect.y == self.starty:
self.fly = False
alliens.remove(self)
hits = pygame.sprite.spritecollide(self, alliens, False)
if hits:
self.direction *= -1
alliens.add(self)
def update(self):
now = pygame.time.get_ticks()
if now - self.lastUpdate > 80:
self.lastUpdate = now
if self.image == self.img1:
self.image = self.img2
else:
self.image = self.img1
x = self.rect.x
y = self.rect.y
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
if self.canFly:
if now - self.fly_timer > self.flyTime:
self.fly_timer = now
self.fly = True
if self.fly == False:
self.hitbottom = False
if self.rect.left <=0:
self.rect.left = 0
self.direction *= -1
if self.rect.right >= WIDTH:
self.rect.right = WIDTH
self.direction *= -1
self.move(self.direction)
if now - self.lastBomb > random.randrange(800, 1000000):
self.lastBomb = now
if self.smartShoot:
if self.rect.x < player.rect.x:
bomba = Bomb(self.rect.centerx, self.rect.bottom, 1)
else:
bomba = Bomb(self.rect.centerx, self.rect.bottom, -1)
else:
bomba = Bomb(self.rect.centerx, self.rect.bottom, random.randrange(4))
allSprites.add(bomba)
bombs.add(bomba)
elif self.fly == True:
if self.hitbottom:
self.move(-1, 5)
else:
self.move(1, 5)
class Boss(pygame.sprite.Sprite):
def __init__(self, bosstype):
pygame.sprite.Sprite.__init__(self)
self.image = bossImg[bosstype-1]
self.rect = self.image.get_rect()
self.rect.centerx = screen.get_rect().centerx
self.rect.y = 5
self.speedy = random.randrange(5*bosstype, 10*bosstype)
self.speedx = random.randrange(5*bosstype, 10*bosstype)
self.directionx = random.choice([-1, 1])
self.directiony = random.choice([-1, 1])
self.lastUpdate = pygame.time.get_ticks()
self.lastDirection = pygame.time.get_ticks()
self.lastBomb = pygame.time.get_ticks()
self.bosstype = bosstype
self.health = 1000 * bosstype
def move(self):
if self.rect.y < 5:
self.rect.y = 5
if self.rect.bottom > HEIGHT - 200:
self.rect.bottom = HEIGHT - 200
if self.rect.x >= 5 and self.rect.y <= HEIGHT - 200:
self.rect.y += self.speedy * self.directiony
if self.rect.x < 5:
self.rect.x = 5
if self.rect.right > WIDTH - 5:
self.rect.right = WIDTH - 5
if self.rect.x >= 5 and self.rect.x <= WIDTH - 5:
self.rect.x += self.speedx * self.directionx
def update(self):
now = pygame.time.get_ticks()
if now - self.lastDirection > random.randrange(1300,10000):
self.lastDirection = now
self.directionx = random.choice([-1, 1])
self.directiony = random.choice([-1, 1])
if now - self.lastUpdate > random.randrange(80, 200):
self.lastUpdate = now
self.move()
if now - self.lastBomb > random.randrange(100, round(100000/self.bosstype)):
self.lastBomb = now
if self.bosstype > 1:
if self.rect.x < player.rect.x:
bomba1 = Bomb(self.rect.centerx, self.rect.bottom, 1)
bomba2 = Bomb(self.rect.centerx - 20, self.rect.bottom, 1)
bomba3 = Bomb(self.rect.centerx + 20, self.rect.bottom, 1)
if self.bosstype == 3:
bomba4 = Bomb(self.rect.centerx - 40, self.rect.bottom, 1)
bomba5 = Bomb(self.rect.centerx + 40, self.rect.bottom, 1)
allSprites.add(bomba4)
bombs.add(bomba4)
allSprites.add(bomba5)
bombs.add(bomba5)
else:
bomba1 = Bomb(self.rect.centerx, self.rect.bottom, -1)
bomba2 = Bomb(self.rect.centerx - 20, self.rect.bottom, -1)
bomba3 = Bomb(self.rect.centerx + 20, self.rect.bottom, -1)
if self.bosstype == 3:
bomba4 = Bomb(self.rect.centerx - 40, self.rect.bottom, -1)
bomba5 = Bomb(self.rect.centerx + 40, self.rect.bottom, -1)
allSprites.add(bomba4)
bombs.add(bomba4)
allSprites.add(bomba5)
bombs.add(bomba5)
else:
bomba1 = Bomb(self.rect.centerx, self.rect.bottom)
bomba2 = Bomb(self.rect.centerx - 20, self.rect.bottom)
bomba3 = Bomb(self.rect.centerx + 20, self.rect.bottom)
allSprites.add(bomba1)
bombs.add(bomba1)
allSprites.add(bomba2)
bombs.add(bomba2)
allSprites.add(bomba3)
bombs.add(bomba3)
class Bomb(pygame.sprite.Sprite):
def __init__(self, x, y, direction = random.choice([-1, 1])):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.transform.scale(bombImg, (10, 20))
self.rect = self.image.get_rect()
self.rect.midtop = (x, y)
self.speedy = random.randrange(2, 6)
self.speedx = random.randrange(3)
self.direction = direction
bomb_sound.play()
def update(self):
self.rect.y += self.speedy
self.rect.x += self.speedx * self.direction
if self.rect.top > HEIGHT or self.rect.left > WIDTH or self.rect.right < 0:
self.kill()
class Bullet(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.transform.scale(bulletImg, (10, 25))
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -7
def update(self):
self.rect.y += self.speedy
if self.rect.bottom < 0:
self.kill()
class PowerUp(pygame.sprite.Sprite):
def __init__(self, center):
pygame.sprite.Sprite.__init__(self)
self.type = random.choice(['health', 'fire'])
if random.random() > 0.9:
self.type = 'life'
self.image = powerupImgs[self.type]
self.rect = self.image.get_rect()
self.rect.center = center
self.speedy = random.randrange(3, 6)
def update(self):
self.rect.y += self.speedy
if self.rect.top > HEIGHT:
self.kill()
class Explosion(pygame.sprite.Sprite):
def __init__(self, center, size):
pygame.sprite.Sprite.__init__(self)
self.size = size
self.image = explosion[self.size][0]
self.rect = self.image.get_rect()
self.rect.center = center
self.frame = 0
self.lastUpdate = pygame.time.get_ticks()
self.frameRate = 50
def update(self):
now = pygame.time.get_ticks()
if now - self.lastUpdate > self.frameRate:
self.lastUpdate = now
self.frame += 1
if self.frame == len(explosion[self.size]):
self.kill()
else:
center = self.rect.center
self.image = explosion[self.size][self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
class Meteor(pygame.sprite.Sprite):
def __init__(self, speedCap, timeCap = 0):
pygame.sprite.Sprite.__init__(self)
self.startImage = random.choice(meteorImg)
self.image = self.startImage.copy()
self.rect = self.image.get_rect()
self.radius = int(self.rect.width / 2)
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.rect.y = random.randrange(-150, -100)
self.speedCap = speedCap
self.speedx = random.randrange(3)
self.speedy = random.randrange(self.speedCap)
self.direction = random.choice([-1, 1])
self.timeCap = timeCap
self.timeStart = pygame.time.get_ticks()
self.rotationAngle = 0
self.rotationSpeed = random.randrange(-9, 9)
self.lastRotation = pygame.time.get_ticks()
def update(self):
if self.timeCap > 0:
if pygame.time.get_ticks() - self.timeStart > self.timeCap:
if self.rect.y < 0:
self.kill()
now = pygame.time.get_ticks()
if now - self.lastRotation > 50:
self.lastRotation = now
self.rotationAngle = (self.rotationAngle + self.rotationSpeed) % 360
oldCenter = self.rect.center
self.image = pygame.transform.rotate(self.startImage, self.rotationAngle)
self.rect = self.image.get_rect()
self.rect.center = oldCenter
self.rect.x += self.speedx * self.direction
self.rect.y += self.speedy
if self.rect.y > HEIGHT or self.rect.right < 0 or self.rect.width > WIDTH:
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.rect.y = random.randrange(-150, -100)
self.speedx = random.randrange(3)
self.speedy = random.randrange(self.speedCap)
class Star(pygame.sprite.Sprite):
def __init__(self, x):
pygame.sprite.Sprite.__init__(self)
self.startImage = pygame.transform.scale(random.choice(starImg), (random.randrange(10,20),random.randrange(10,20)))
self.image = self.startImage.copy()
self.rect = self.image.get_rect()
self.rect.x = x
self.startx = x
self.rect.y = -30
self.speedx = random.randrange(2, 5)
self.speedy = random.randrange(2, 6)
self.direction = random.choice([-1, 1])
self.timeStart = pygame.time.get_ticks()
self.rotationAngle = 0
self.rotationSpeed = random.randrange(-7, 7)
self.lastRotation = pygame.time.get_ticks()
def update(self):
self.rect.x += self.speedx * self.direction
self.rect.y += self.speedy
if self.rect.y > HEIGHT+25 or self.rect.x < 0-15 or self.rect.x > WIDTH+15:
self.rect.y = -25
self.rect.x = self.startx
now = pygame.time.get_ticks()
if now - self.lastRotation > 50:
self.lastRotation = now
self.rotationAngle = (self.rotationAngle + self.rotationSpeed) % 360
oldCenter = self.rect.center
self.image = pygame.transform.rotate(self.startImage, self.rotationAngle)
self.rect = self.image.get_rect()
self.rect.center = oldCenter
def destroy(self):
if self.rect.y > HEIGHT or self.rect.y < 0 or self.rect.x < 0 or self.rect.x > WIDTH:
self.kill()
class Button(pygame.sprite.Sprite):
def __init__(self, x, y, type):
pygame.sprite.Sprite.__init__(self)
self.type = type
self.image = buttonImg
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.clicked = pygame.mouse.get_pressed()
def update(self):
mouse = pygame.mouse.get_pos()
self.clicked = pygame.mouse.get_pressed()
if mouse[0] >= self.rect.x and mouse[0] <= self.rect.right and mouse[1] >= self.rect.y and mouse[1] <= self.rect.bottom:
self.image = buttonLitImg
if self.clicked[0] == 1:
self.action()
else:
self.image = buttonImg
printText(screen, self.type, 42, self.rect.x + 22, self.rect.y + 55, LIGHT_GREEN, self.rect.center)
def action(self):
if self.type == 'PLAY':
runGame()
elif self.type == 'EXIT':
pygame.quit()
playerImg = loadImage('avion.png')
playerImg_immune = loadImage('avion_immune.png')
playerLifeImg = pygame.transform.scale(loadImage('life.png'), (25, 20))
bulletImg = loadImage('raketa.png')
bombImg = loadImage('bomba.png')
allienImg = [loadImage('vanzemaljaca0.png'), loadImage('vanzemaljaca1.png'), loadImage('vanzemaljacb0.png'),
loadImage('vanzemaljacb1.png'), loadImage('vanzemaljacc0.png'), loadImage('vanzemaljacc1.png'), ]
bossImg = [pygame.transform.scale(loadImage('boss1.png'), (200, 200)),
pygame.transform.scale(loadImage('boss2.png'), (200, 200)),
pygame.transform.scale(loadImage('boss3.png'), (200, 200))]
meteorImg = [pygame.transform.scale(loadImage('meteor1.png'), (100, 100)),
pygame.transform.scale(loadImage('meteor2.png'), (70, 70)),
pygame.transform.scale(loadImage('meteor3.png'), (50, 50)),
pygame.transform.scale(loadImage('meteor4.png'), (30, 30)),
pygame.transform.scale(loadImage('meteor5.png'), (20, 20))]
starImg = [loadImage('star1.png'), loadImage('star2.png'), loadImage('star3.png'), loadImage('star4.png'), loadImage('star5.png')]
buttonImg = pygame.transform.scale(loadImage('button.png'), (170, 70))
buttonLitImg = pygame.transform.scale(loadImage('buttonLit.png'), (170, 70))
backgroundImg = pygame.transform.scale(loadImage('starfield.png'), (WIDTH, HEIGHT))
backgroundRect = backgroundImg.get_rect()
startImg = pygame.transform.scale(loadImage('startscreen.png'), (WIDTH, HEIGHT))
startImgRect = startImg.get_rect()
storyImg = pygame.transform.scale(loadImage('storyImg.png'), (WIDTH, HEIGHT))
storyImgRect = storyImg.get_rect()
pauseScreen = pygame.Surface((WIDTH, HEIGHT)).convert_alpha()
pauseScreen.fill((0, 0, 0, 190))
explosion = {}
explosion['large'] = []
explosion['small'] = []
powerupImgs = {}
powerupImgs['health'] = pygame.transform.scale(loadImage('health.png'), (30, 30))
powerupImgs['fire'] = pygame.transform.scale(loadImage('fire.png'), (30, 30))
powerupImgs['life'] = pygame.transform.scale(loadImage('life.png'), (30, 30))
for i in range(10):
file = 'explosion{}.png'.format(i)
img = loadImage(file)
imgLarge = pygame.transform.scale(img, (70, 70))
explosion['large'].append(imgLarge)
imgSmall = pygame.transform.scale(img, (30, 30))
explosion['small'].append(imgSmall)
background_music = loadSound('RoundtableRival.ogg')
pygame.mixer.music = background_music
pygame.mixer.music.set_volume(0.2)
boss_fight_music = loadSound('DBZ_BOSS_FIGHT.ogg')
story_music = loadSound('STAR_WARS.ogg')
shoot_sound = loadSound('shoot.wav')
pygame.mixer.Sound.set_volume(shoot_sound, 0.4)
bomb_sound = loadSound('bomb.wav')
pygame.mixer.Sound.set_volume(bomb_sound, 0.3)
powerup_sound = loadSound('powerup.wav')
pygame.mixer.Sound.set_volume(powerup_sound, 0.6)
playerExplosion_sound = loadSound('playerExplosion.wav')
meteorExplosion_sound = loadSound('meteorExplosion.wav')
pygame.mixer.Sound.set_volume(meteorExplosion_sound, 0.6)
allienExplosion_sound = loadSound('allienExplosion.wav')
pygame.mixer.Sound.set_volume(allienExplosion_sound, 0.5)
keyPress_sound = loadSound('keypress.wav')
pygame.mixer.Sound.set_volume(keyPress_sound, 0.5)
# LOADING HIGH SCORE
try:
with open(os.path.join(textfile_dir, 'highscore.txt'), 'r') as f: # automatic file close after loop
try:
highscore = int(f.read())
except:
highscore = 0
except:
with open(os.path.join(textfile_dir, 'highscore.txt'), 'w') as f: # automatic file close after loop
highscore = 0
allSprites = pygame.sprite.Group()
alliens = pygame.sprite.Group()
meteors = pygame.sprite.Group()
bullets = pygame.sprite.Group()
bombs = pygame.sprite.Group()
bosses = pygame.sprite.Group()
stars = pygame.sprite.Group()
powerups = pygame.sprite.Group()
buttons = pygame.sprite.Group()
player = Player()
allSprites.add(player)
paused = False
level = 1
def initializeGame():
global paused
alliens.empty()
meteors.empty()
bullets.empty()
bombs.empty()
powerups.empty()
bosses.empty()
stars.empty()
player.reset()
allSprites.empty()
allSprites.add(player)
paused = False
def showGameStartScreen():
pygame.mixer.music.play(-1)
buttons.empty()
btn = Button(280, 300, 'PLAY')
buttons.add(btn)
btn = Button(600, 550, 'EXIT')
buttons.add(btn)
while True:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
screen.blit(startImg, startImgRect)
buttons.draw(screen)
printText(screen, 'HIGH SCORE:' + str(highscore), 30, WIDTH/2 - 165, HEIGHT-30, LIGHT_GREEN)
buttons.update() # PRINTING TEXT ON BUTTONS
pygame.display.update()
def showTransitionScreen(text):
global paused, level
running = True
timer = pygame.time.get_ticks()
#add stars
for i in range(NUMSTARS):
x = random.randrange(WIDTH)
z = Star(x)
stars.add(z)
stars.update()
while stars:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.KEYDOWN:
if event.key == K_SPACE and not paused and (pygame.time.get_ticks() - player.shoot_timer > RELOAD):
player.shoot()
if event.key == K_p:
paused = not paused
hits = pygame.sprite.spritecollide(player, powerups, True)
for hit in hits:
powerup_sound.play()
if hit.type == 'health':
player.health += 20
if player.health > PLAYER_MAX_HEALTH:
player.health = PLAYER_MAX_HEALTH
elif hit.type == 'life':
player.lives += 1
if player.lives > 3:
player.lives = 3
else:
player.powerup()
if not paused:
stars.update()
allSprites.update()
# DRAW
screen.fill(BLACK)
screen.blit(backgroundImg, backgroundRect)
stars.draw(screen)
printText(screen, 'Level: ' + str(level), 25, 9, HEIGHT - 29, LIGHT_GREEN)
printText(screen, 'SCORE:' + str(player.score), 25, WIDTH - 185, HEIGHT - 3, LIGHT_GREEN)
allSprites.draw(screen)
now = pygame.time.get_ticks()
if now - timer > 3000 and now - timer < 6000:
if (pygame.time.get_ticks() - timer) % 120 <= 100:
printText(screen, text, 70, 0, 0, LIGHT_GREEN, (WIDTH/2, 100))
drawHealthBar(screen, 10, HEIGHT - 30, player.health, GREEN, PLAYER_MAX_HEALTH, 200)
drawLives(screen, 15, HEIGHT - 29, player.lives, playerLifeImg)
if paused:
printText(screen, text, 70, 0, 0, LIGHT_GREEN, (WIDTH / 2, 100))
screen.blit(pauseScreen, (0, 0))
printText(screen, 'PAUSE', 100, 0, 0, LIGHT_GREEN, screen.get_rect().center)
pygame.display.update()
if now - timer > 5000 and not paused:
for z in stars:
Star.destroy(z)
def startLevel(allienRows, smartShoot, suicide):
for k in range(allienRows):
for i in range(11):
tmp = random.choice([0, 2, 4])
a = Alien(70 * i, k * 70, allienImg[tmp], allienImg[tmp + 1], smartShoot, suicide)
allSprites.add(a)
alliens.add(a)
def startMeteorRain(k, speedCap, time):
for i in range(k):
m = Meteor(speedCap, time)
meteors.add(m)
allSprites.add(m)
def spawnBoss(x):
boss = Boss(x)
bosses.add(boss)
allSprites.add(boss)
runLvl()
boss_fight_music.stop()
pygame.mixer.music.play(-1)
def checkCollision():
hits = pygame.sprite.spritecollide(player, powerups, True)
for hit in hits:
powerup_sound.play()
if hit.type == 'health':
player.health += 20
if player.health > PLAYER_MAX_HEALTH:
player.health = PLAYER_MAX_HEALTH
elif hit.type == 'life':
player.lives += 1
if player.lives > 3:
player.lives = 3
else:
player.powerup()
hits = pygame.sprite.groupcollide(alliens, bullets, True, True)
for hit in hits:
player.score += 7 * hit.speedx
allienExplosion_sound.play()
expl = Explosion(hit.rect.center, 'large')
allSprites.add(expl)
if random.random() > 0.8:
pow = PowerUp(hit.rect.center)
powerups.add(pow)
allSprites.add(pow)
hits = pygame.sprite.groupcollide(bullets, bosses, True, False)
for hit in hits:
allienExplosion_sound.play()
expl = Explosion(hit.rect.midtop, 'large')
allSprites.add(expl)
for boss in bosses:
player.score += 5 * (boss.speedx + 1)
boss.health -= 99
if boss.health <= 0:
bosses.remove()
hits = pygame.sprite.spritecollide(player, bombs, True)
for hit in hits:
if not player.immune:
player.health -= 13 * hit.speedy
if player.health <= 0:
expl = Explosion(player.rect.center, 'large')
player.lives -= 1
player.hide()
allSprites.add(expl)
playerExplosion_sound.play()
if player.lives > 0:
player.health = PLAYER_MAX_HEALTH
else:
expl = Explosion(hit.rect.center, 'small')
allSprites.add(expl)
playerExplosion_sound.play()
hits = pygame.sprite.groupcollide(meteors, bullets, True, True)
for hit in hits:
player.score += 60 - hit.radius
meteorExplosion_sound.play()
expl = Explosion(hit.rect.center, 'large')
allSprites.add(expl)
hits = pygame.sprite.spritecollide(player, meteors, True, pygame.sprite.collide_circle)
for hit in hits:
if not player.immune:
player.health -= 2 * hit.radius
if player.health <= 0:
expl = Explosion(hit.rect.center, 'large')
player.lives -= 1
player.hide()
allSprites.add(expl)
expl = Explosion(player.rect.center, 'large')
allSprites.add(expl)
playerExplosion_sound.play()
meteorExplosion_sound.play()
if player.lives > 0:
player.health = PLAYER_MAX_HEALTH
else:
expl = Explosion(hit.rect.center, 'small')
allSprites.add(expl)
playerExplosion_sound.play()
hits = pygame.sprite.spritecollide(player, alliens, True)
for hit in hits:
if not player.immune:
player.lives -= 1
if player.lives > 0:
player.health = PLAYER_MAX_HEALTH
expl = Explosion(player.rect.center, 'large')
player.hide()
allSprites.add(expl)
playerExplosion_sound.play()
expl = Explosion(hit.rect.center, 'large')
allienExplosion_sound.play()
allSprites.add(expl)
hits = pygame.sprite.spritecollide(player, bosses, False)
for hit in hits:
if not player.immune:
player.lives -= 1
if player.lives > 0:
player.health = PLAYER_MAX_HEALTH
expl = Explosion(player.rect.center, 'large')
player.hide()
allSprites.add(expl)
playerExplosion_sound.play()
def showGameOverScreen():
global highscore
buttons.empty()
btn = Button(280, 550, 'PLAY')
buttons.add(btn)
btn = Button(600, 550, 'EXIT')
buttons.add(btn)
if player.score > highscore:
highscore = player.score
with open(os.path.join(textfile_dir, 'highscore.txt'), 'w') as f: # automatic file close after loop
f.write(str(highscore))
while True:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
screen.fill(BLACK)
screen.blit(backgroundImg, backgroundRect)
if player.lives > 0:
printText(screen, 'VICTORY', 100, 0, 0, LIGHT_GREEN, (WIDTH/2, HEIGHT/2-120))
else:
printText(screen, 'DEFEAT', 100, 0, 0, LIGHT_GREEN, (WIDTH/2, HEIGHT/2-120))
if player.score == highscore:
printText(screen, 'NEW HIGH SCORE!', 70, 0, 0, LIGHT_GREEN, (WIDTH / 2, HEIGHT / 2))
printText(screen, str(highscore), 70, 0, 0, LIGHT_GREEN, (WIDTH / 2, HEIGHT / 2 + 90))
else:
printText(screen, 'SCORE: ' + str(player.score), 65, 0, 0, LIGHT_GREEN, (WIDTH/2, HEIGHT/2))
printText(screen, 'HIGH SCORE: ' + str(highscore), 65, 0, 0, LIGHT_GREEN, (WIDTH/2, HEIGHT/2 + 90))
buttons.draw(screen)
buttons.update() # PRINTING TEXT ON BUTTONS
pygame.display.update()
def runLvl():
global paused, player
while alliens or meteors or bosses:
clock.tick(FPS)
# PROCESS
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.KEYDOWN:
if event.key == K_SPACE and not paused and (pygame.time.get_ticks() - player.shoot_timer > RELOAD):
player.shoot()
if event.key == K_p:
paused = not paused
checkCollision()
# UPDATE
if not paused:
allSprites.update()
# DRAW
screen.fill(BLACK)
screen.blit(backgroundImg, backgroundRect)
printText(screen, 'Level: ' + str(level), 25, 9, HEIGHT - 29, LIGHT_GREEN)
printText(screen, 'SCORE:' + str(player.score), 25, WIDTH - 185, HEIGHT - 3, LIGHT_GREEN)
allSprites.draw(screen)
for boss in bosses:
drawHealthBar(screen, 240, HEIGHT - 30, boss.health, RED, 1000*boss.bosstype, 350)
if boss.health <= 0:
player.score += 300*boss.bosstype
bosses.remove(boss)
allSprites.remove(boss)
drawHealthBar(screen, 10, HEIGHT - 30, player.health, GREEN, PLAYER_MAX_HEALTH, 200)
drawLives(screen, 15, HEIGHT - 29, player.lives, playerLifeImg)
if paused:
screen.blit(pauseScreen, (0, 0))
printText(screen, 'PAUSE', 100, 0, 0, LIGHT_GREEN, screen.get_rect().center)
pygame.display.update()
def runGame():
initializeGame()
global level
showTransitionScreen('ARMY ATTACKS')
startLevel(3, False, False)
runLvl()
showTransitionScreen('METEOR RAIN')
startMeteorRain(30, 6, 2500)
runLvl()
pygame.mixer.music.stop()
boss_fight_music.play(-1)
showTransitionScreen('CAPTAIN ATTACKS')
spawnBoss(1)
level += 1
showTransitionScreen('ARMY ATTACKS')
startLevel(4, True, False)
runLvl()
showTransitionScreen('METEOR RAIN')
startMeteorRain(45, 8, 5000)
runLvl()
pygame.mixer.music.stop()
boss_fight_music.play(-1)
showTransitionScreen('GENERAL ATTACKS')
spawnBoss(2)
level += 1
showTransitionScreen('ARMY ATTACKS')
startLevel(5, True, True)
runLvl()
showTransitionScreen('METEOR RAIN')
startMeteorRain(50, 8, 5500)
runLvl()
pygame.mixer.music.stop()
boss_fight_music.play(-1)
showTransitionScreen('LEADER ATTACKS')
spawnBoss(3)
if (not alliens) and (not bosses):
showTransitionScreen('ALIENS DEFEATED')
showGameOverScreen()
# MAIN
showStory()
pygame.quit()
|
[
"pygame.event.get",
"pygame.mixer.init",
"pygame.Rect",
"pygame.display.update",
"pygame.sprite.spritecollide",
"pygame.font.Font",
"pygame.mouse.get_pos",
"pygame.display.set_mode",
"pygame.mixer.music.play",
"pygame.transform.scale",
"pygame.display.set_caption",
"pygame.mixer.Sound",
"pygame.quit",
"pygame.mouse.get_pressed",
"pygame.Surface",
"pygame.draw.rect",
"pygame.sprite.groupcollide",
"pygame.mixer.pre_init",
"pygame.init",
"random.random",
"pygame.image.load",
"pygame.mixer.Sound.set_volume",
"pygame.time.get_ticks",
"pygame.time.Clock",
"pygame.mixer.music.stop",
"pygame.transform.rotate",
"pygame.display.set_icon",
"random.choice",
"pygame.sprite.Group",
"pygame.mixer.music.set_volume",
"random.randrange",
"pygame.sprite.Sprite.__init__",
"pygame.key.get_pressed"
] |
[((386, 427), 'pygame.mixer.pre_init', 'pygame.mixer.pre_init', (['(44100)', '(-16)', '(1)', '(512)'], {}), '(44100, -16, 1, 512)\n', (407, 427), False, 'import pygame\n'), ((490, 509), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (507, 509), False, 'import pygame\n'), ((528, 541), 'pygame.init', 'pygame.init', ([], {}), '()\n', (539, 541), False, 'import pygame\n'), ((552, 592), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(WIDTH, HEIGHT)'], {}), '((WIDTH, HEIGHT))\n', (575, 592), False, 'import pygame\n'), ((594, 639), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Save The Galaxy"""'], {}), "('Save The Galaxy')\n", (620, 639), False, 'import pygame\n'), ((649, 668), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (666, 668), False, 'import pygame\n'), ((1104, 1136), 'pygame.display.set_icon', 'pygame.display.set_icon', (['iconImg'], {}), '(iconImg)\n', (1127, 1136), False, 'import pygame\n'), ((1335, 1358), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1356, 1358), False, 'import pygame\n'), ((24339, 24373), 'pygame.mixer.music.set_volume', 'pygame.mixer.music.set_volume', (['(0.2)'], {}), '(0.2)\n', (24368, 24373), False, 'import pygame\n'), ((24507, 24554), 'pygame.mixer.Sound.set_volume', 'pygame.mixer.Sound.set_volume', (['shoot_sound', '(0.4)'], {}), '(shoot_sound, 0.4)\n', (24536, 24554), False, 'import pygame\n'), ((24592, 24638), 'pygame.mixer.Sound.set_volume', 'pygame.mixer.Sound.set_volume', (['bomb_sound', '(0.3)'], {}), '(bomb_sound, 0.3)\n', (24621, 24638), False, 'import pygame\n'), ((24682, 24731), 'pygame.mixer.Sound.set_volume', 'pygame.mixer.Sound.set_volume', (['powerup_sound', '(0.6)'], {}), '(powerup_sound, 0.6)\n', (24711, 24731), False, 'import pygame\n'), ((24849, 24906), 'pygame.mixer.Sound.set_volume', 'pygame.mixer.Sound.set_volume', (['meteorExplosion_sound', '(0.6)'], {}), '(meteorExplosion_sound, 0.6)\n', (24878, 24906), False, 'import pygame\n'), ((24966, 25023), 'pygame.mixer.Sound.set_volume', 'pygame.mixer.Sound.set_volume', (['allienExplosion_sound', '(0.5)'], {}), '(allienExplosion_sound, 0.5)\n', (24995, 25023), False, 'import pygame\n'), ((25069, 25119), 'pygame.mixer.Sound.set_volume', 'pygame.mixer.Sound.set_volume', (['keyPress_sound', '(0.5)'], {}), '(keyPress_sound, 0.5)\n', (25098, 25119), False, 'import pygame\n'), ((25511, 25532), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (25530, 25532), False, 'import pygame\n'), ((25544, 25565), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (25563, 25565), False, 'import pygame\n'), ((25577, 25598), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (25596, 25598), False, 'import pygame\n'), ((25610, 25631), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (25629, 25631), False, 'import pygame\n'), ((25641, 25662), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (25660, 25662), False, 'import pygame\n'), ((25673, 25694), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (25692, 25694), False, 'import pygame\n'), ((25704, 25725), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (25723, 25725), False, 'import pygame\n'), ((25738, 25759), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (25757, 25759), False, 'import pygame\n'), ((25771, 25792), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (25790, 25792), False, 'import pygame\n'), ((38018, 38031), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (38029, 38031), False, 'import pygame\n'), ((976, 999), 'pygame.image.load', 'pygame.image.load', (['file'], {}), '(file)\n', (993, 999), False, 'import pygame\n'), ((1439, 1463), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['file'], {}), '(file)\n', (1457, 1463), False, 'import pygame\n'), ((1560, 1588), 'pygame.font.Font', 'pygame.font.Font', (['FONT', 'size'], {}), '(FONT, size)\n', (1576, 1588), False, 'import pygame\n'), ((1977, 2000), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (1998, 2000), False, 'import pygame\n'), ((2690, 2713), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2711, 2713), False, 'import pygame\n'), ((3890, 3929), 'pygame.Rect', 'pygame.Rect', (['x', 'y', 'barLength', 'barHeight'], {}), '(x, y, barLength, barHeight)\n', (3901, 3929), False, 'import pygame\n'), ((3946, 3980), 'pygame.Rect', 'pygame.Rect', (['x', 'y', 'fill', 'barHeight'], {}), '(x, y, fill, barHeight)\n', (3957, 3980), False, 'import pygame\n'), ((3986, 4034), 'pygame.draw.rect', 'pygame.draw.rect', (['surface', 'healthColor', 'fillRect'], {}), '(surface, healthColor, fillRect)\n', (4002, 4034), False, 'import pygame\n'), ((4040, 4088), 'pygame.draw.rect', 'pygame.draw.rect', (['surface', 'WHITE', 'outlineRect', '(2)'], {}), '(surface, WHITE, outlineRect, 2)\n', (4056, 4088), False, 'import pygame\n'), ((24068, 24105), 'pygame.transform.scale', 'pygame.transform.scale', (['img', '(70, 70)'], {}), '(img, (70, 70))\n', (24090, 24105), False, 'import pygame\n'), ((24163, 24200), 'pygame.transform.scale', 'pygame.transform.scale', (['img', '(30, 30)'], {}), '(img, (30, 30))\n', (24185, 24200), False, 'import pygame\n'), ((26181, 26208), 'pygame.mixer.music.play', 'pygame.mixer.music.play', (['(-1)'], {}), '(-1)\n', (26204, 26208), False, 'import pygame\n'), ((26872, 26895), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (26893, 26895), False, 'import pygame\n'), ((29837, 29864), 'pygame.mixer.music.play', 'pygame.mixer.music.play', (['(-1)'], {}), '(-1)\n', (29860, 29864), False, 'import pygame\n'), ((29904, 29955), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['player', 'powerups', '(True)'], {}), '(player, powerups, True)\n', (29931, 29955), False, 'import pygame\n'), ((30369, 30425), 'pygame.sprite.groupcollide', 'pygame.sprite.groupcollide', (['alliens', 'bullets', '(True)', '(True)'], {}), '(alliens, bullets, True, True)\n', (30395, 30425), False, 'import pygame\n'), ((30765, 30821), 'pygame.sprite.groupcollide', 'pygame.sprite.groupcollide', (['bullets', 'bosses', '(True)', '(False)'], {}), '(bullets, bosses, True, False)\n', (30791, 30821), False, 'import pygame\n'), ((31158, 31206), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['player', 'bombs', '(True)'], {}), '(player, bombs, True)\n', (31185, 31206), False, 'import pygame\n'), ((31809, 31865), 'pygame.sprite.groupcollide', 'pygame.sprite.groupcollide', (['meteors', 'bullets', '(True)', '(True)'], {}), '(meteors, bullets, True, True)\n', (31835, 31865), False, 'import pygame\n'), ((32063, 32148), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['player', 'meteors', '(True)', 'pygame.sprite.collide_circle'], {}), '(player, meteors, True, pygame.sprite.collide_circle\n )\n', (32090, 32148), False, 'import pygame\n'), ((32891, 32941), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['player', 'alliens', '(True)'], {}), '(player, alliens, True)\n', (32918, 32941), False, 'import pygame\n'), ((33411, 33461), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['player', 'bosses', '(False)'], {}), '(player, bosses, False)\n', (33438, 33461), False, 'import pygame\n'), ((37107, 37132), 'pygame.mixer.music.stop', 'pygame.mixer.music.stop', ([], {}), '()\n', (37130, 37132), False, 'import pygame\n'), ((37431, 37456), 'pygame.mixer.music.stop', 'pygame.mixer.music.stop', ([], {}), '()\n', (37454, 37456), False, 'import pygame\n'), ((37754, 37779), 'pygame.mixer.music.stop', 'pygame.mixer.music.stop', ([], {}), '()\n', (37777, 37779), False, 'import pygame\n'), ((2072, 2090), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2088, 2090), False, 'import pygame\n'), ((3467, 3485), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3483, 3485), False, 'import pygame\n'), ((4362, 4397), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (4391, 4397), False, 'import pygame\n'), ((4749, 4772), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (4770, 4772), False, 'import pygame\n'), ((4831, 4854), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (4852, 4854), False, 'import pygame\n'), ((4910, 4933), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (4931, 4933), False, 'import pygame\n'), ((4962, 4985), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (4983, 4985), False, 'import pygame\n'), ((5962, 5986), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (5984, 5986), False, 'import pygame\n'), ((7724, 7747), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (7745, 7747), False, 'import pygame\n'), ((7871, 7894), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (7892, 7894), False, 'import pygame\n'), ((8252, 8287), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (8281, 8287), False, 'import pygame\n'), ((8539, 8561), 'random.randrange', 'random.randrange', (['(1)', '(3)'], {}), '(1, 3)\n', (8555, 8561), False, 'import random\n'), ((8617, 8640), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (8638, 8640), False, 'import pygame\n'), ((8666, 8689), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (8687, 8689), False, 'import pygame\n'), ((8807, 8830), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (8828, 8830), False, 'import pygame\n'), ((8922, 8951), 'random.randrange', 'random.randrange', (['(5000)', '(30000)'], {}), '(5000, 30000)\n', (8938, 8951), False, 'import random\n'), ((9510, 9559), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['self', 'alliens', '(False)'], {}), '(self, alliens, False)\n', (9537, 9559), False, 'import pygame\n'), ((9679, 9702), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (9700, 9702), False, 'import pygame\n'), ((11375, 11410), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (11404, 11410), False, 'import pygame\n'), ((11599, 11644), 'random.randrange', 'random.randrange', (['(5 * bosstype)', '(10 * bosstype)'], {}), '(5 * bosstype, 10 * bosstype)\n', (11615, 11644), False, 'import random\n'), ((11664, 11709), 'random.randrange', 'random.randrange', (['(5 * bosstype)', '(10 * bosstype)'], {}), '(5 * bosstype, 10 * bosstype)\n', (11680, 11709), False, 'import random\n'), ((11733, 11755), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (11746, 11755), False, 'import random\n'), ((11783, 11805), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (11796, 11805), False, 'import random\n'), ((11833, 11856), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (11854, 11856), False, 'import pygame\n'), ((11887, 11910), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (11908, 11910), False, 'import pygame\n'), ((11936, 11959), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (11957, 11959), False, 'import pygame\n'), ((12625, 12648), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (12646, 12648), False, 'import pygame\n'), ((14995, 15017), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (15008, 15017), False, 'import random\n'), ((15029, 15064), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (15058, 15064), False, 'import pygame\n'), ((15087, 15128), 'pygame.transform.scale', 'pygame.transform.scale', (['bombImg', '(10, 20)'], {}), '(bombImg, (10, 20))\n', (15109, 15128), False, 'import pygame\n'), ((15230, 15252), 'random.randrange', 'random.randrange', (['(2)', '(6)'], {}), '(2, 6)\n', (15246, 15252), False, 'import random\n'), ((15276, 15295), 'random.randrange', 'random.randrange', (['(3)'], {}), '(3)\n', (15292, 15295), False, 'import random\n'), ((15666, 15701), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (15695, 15701), False, 'import pygame\n'), ((15724, 15767), 'pygame.transform.scale', 'pygame.transform.scale', (['bulletImg', '(10, 25)'], {}), '(bulletImg, (10, 25))\n', (15746, 15767), False, 'import pygame\n'), ((16104, 16139), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (16133, 16139), False, 'import pygame\n'), ((16161, 16194), 'random.choice', 'random.choice', (["['health', 'fire']"], {}), "(['health', 'fire'])\n", (16174, 16194), False, 'import random\n'), ((16408, 16430), 'random.randrange', 'random.randrange', (['(3)', '(6)'], {}), '(3, 6)\n', (16424, 16430), False, 'import random\n'), ((16647, 16682), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (16676, 16682), False, 'import pygame\n'), ((16884, 16907), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (16905, 16907), False, 'import pygame\n'), ((16977, 17000), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (16998, 17000), False, 'import pygame\n'), ((17520, 17555), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (17549, 17555), False, 'import pygame\n'), ((17583, 17607), 'random.choice', 'random.choice', (['meteorImg'], {}), '(meteorImg)\n', (17596, 17607), False, 'import random\n'), ((17767, 17808), 'random.randrange', 'random.randrange', (['(WIDTH - self.rect.width)'], {}), '(WIDTH - self.rect.width)\n', (17783, 17808), False, 'import random\n'), ((17832, 17860), 'random.randrange', 'random.randrange', (['(-150)', '(-100)'], {}), '(-150, -100)\n', (17848, 17860), False, 'import random\n'), ((17918, 17937), 'random.randrange', 'random.randrange', (['(3)'], {}), '(3)\n', (17934, 17937), False, 'import random\n'), ((17961, 17992), 'random.randrange', 'random.randrange', (['self.speedCap'], {}), '(self.speedCap)\n', (17977, 17992), False, 'import random\n'), ((18019, 18041), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (18032, 18041), False, 'import random\n'), ((18100, 18123), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (18121, 18123), False, 'import pygame\n'), ((18186, 18209), 'random.randrange', 'random.randrange', (['(-9)', '(9)'], {}), '(-9, 9)\n', (18202, 18209), False, 'import random\n'), ((18239, 18262), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (18260, 18262), False, 'import pygame\n'), ((18480, 18503), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (18501, 18503), False, 'import pygame\n'), ((19367, 19402), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (19396, 19402), False, 'import pygame\n'), ((19716, 19738), 'random.randrange', 'random.randrange', (['(2)', '(5)'], {}), '(2, 5)\n', (19732, 19738), False, 'import random\n'), ((19762, 19784), 'random.randrange', 'random.randrange', (['(2)', '(6)'], {}), '(2, 6)\n', (19778, 19784), False, 'import random\n'), ((19811, 19833), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (19824, 19833), False, 'import random\n'), ((19860, 19883), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (19881, 19883), False, 'import pygame\n'), ((19946, 19969), 'random.randrange', 'random.randrange', (['(-7)', '(7)'], {}), '(-7, 7)\n', (19962, 19969), False, 'import random\n'), ((19999, 20022), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (20020, 20022), False, 'import pygame\n'), ((20313, 20336), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (20334, 20336), False, 'import pygame\n'), ((20950, 20985), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (20979, 20985), False, 'import pygame\n'), ((21161, 21187), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (21185, 21187), False, 'import pygame\n'), ((21230, 21252), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (21250, 21252), False, 'import pygame\n'), ((21277, 21303), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (21301, 21303), False, 'import pygame\n'), ((23551, 23582), 'pygame.Surface', 'pygame.Surface', (['(WIDTH, HEIGHT)'], {}), '((WIDTH, HEIGHT))\n', (23565, 23582), False, 'import pygame\n'), ((26412, 26430), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (26428, 26430), False, 'import pygame\n'), ((26752, 26775), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (26773, 26775), False, 'import pygame\n'), ((26958, 26981), 'random.randrange', 'random.randrange', (['WIDTH'], {}), '(WIDTH)\n', (26974, 26981), False, 'import random\n'), ((27118, 27136), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (27134, 27136), False, 'import pygame\n'), ((27510, 27561), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['player', 'powerups', '(True)'], {}), '(player, powerups, True)\n', (27537, 27561), False, 'import pygame\n'), ((28453, 28476), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (28474, 28476), False, 'import pygame\n'), ((29092, 29115), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (29113, 29115), False, 'import pygame\n'), ((34267, 34285), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (34283, 34285), False, 'import pygame\n'), ((35239, 35262), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (35260, 35262), False, 'import pygame\n'), ((35418, 35436), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (35434, 35436), False, 'import pygame\n'), ((36833, 36856), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (36854, 36856), False, 'import pygame\n'), ((2507, 2530), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2528, 2530), False, 'import pygame\n'), ((2598, 2621), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (2619, 2621), False, 'import pygame\n'), ((5197, 5222), 'pygame.mixer.music.stop', 'pygame.mixer.music.stop', ([], {}), '()\n', (5220, 5222), False, 'import pygame\n'), ((5273, 5300), 'pygame.mixer.music.play', 'pygame.mixer.music.play', (['(-1)'], {}), '(-1)\n', (5296, 5300), False, 'import pygame\n'), ((5801, 5824), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (5822, 5824), False, 'import pygame\n'), ((6710, 6733), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (6731, 6733), False, 'import pygame\n'), ((12688, 12717), 'random.randrange', 'random.randrange', (['(1300)', '(10000)'], {}), '(1300, 10000)\n', (12704, 12717), False, 'import random\n'), ((12787, 12809), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (12800, 12809), False, 'import random\n'), ((12841, 12863), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (12854, 12863), False, 'import random\n'), ((12900, 12925), 'random.randrange', 'random.randrange', (['(80)', '(200)'], {}), '(80, 200)\n', (12916, 12925), False, 'import random\n'), ((16207, 16222), 'random.random', 'random.random', ([], {}), '()\n', (16220, 16222), False, 'import random\n'), ((18733, 18793), 'pygame.transform.rotate', 'pygame.transform.rotate', (['self.startImage', 'self.rotationAngle'], {}), '(self.startImage, self.rotationAngle)\n', (18756, 18793), False, 'import pygame\n'), ((19087, 19128), 'random.randrange', 'random.randrange', (['(WIDTH - self.rect.width)'], {}), '(WIDTH - self.rect.width)\n', (19103, 19128), False, 'import random\n'), ((19156, 19184), 'random.randrange', 'random.randrange', (['(-150)', '(-100)'], {}), '(-150, -100)\n', (19172, 19184), False, 'import random\n'), ((19212, 19231), 'random.randrange', 'random.randrange', (['(3)'], {}), '(3)\n', (19228, 19231), False, 'import random\n'), ((19259, 19290), 'random.randrange', 'random.randrange', (['self.speedCap'], {}), '(self.speedCap)\n', (19275, 19290), False, 'import random\n'), ((19453, 19475), 'random.choice', 'random.choice', (['starImg'], {}), '(starImg)\n', (19466, 19475), False, 'import random\n'), ((20566, 20626), 'pygame.transform.rotate', 'pygame.transform.rotate', (['self.startImage', 'self.rotationAngle'], {}), '(self.startImage, self.rotationAngle)\n', (20589, 20626), False, 'import pygame\n'), ((29362, 29386), 'random.choice', 'random.choice', (['[0, 2, 4]'], {}), '([0, 2, 4])\n', (29375, 29386), False, 'import random\n'), ((30620, 30635), 'random.random', 'random.random', ([], {}), '()\n', (30633, 30635), False, 'import random\n'), ((2152, 2165), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2163, 2165), False, 'import pygame\n'), ((2332, 2355), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (2353, 2355), False, 'import pygame\n'), ((2373, 2388), 'random.random', 'random.random', ([], {}), '()\n', (2386, 2388), False, 'import random\n'), ((3547, 3560), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (3558, 3560), False, 'import pygame\n'), ((5512, 5535), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (5533, 5535), False, 'import pygame\n'), ((10609, 10639), 'random.randrange', 'random.randrange', (['(800)', '(1000000)'], {}), '(800, 1000000)\n', (10625, 10639), False, 'import random\n'), ((19478, 19502), 'random.randrange', 'random.randrange', (['(10)', '(20)'], {}), '(10, 20)\n', (19494, 19502), False, 'import random\n'), ((19502, 19526), 'random.randrange', 'random.randrange', (['(10)', '(20)'], {}), '(10, 20)\n', (19518, 19526), False, 'import random\n'), ((21833, 21846), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (21844, 21846), False, 'import pygame\n'), ((26492, 26505), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (26503, 26505), False, 'import pygame\n'), ((27198, 27211), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (27209, 27211), False, 'import pygame\n'), ((34347, 34360), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (34358, 34360), False, 'import pygame\n'), ((35498, 35511), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (35509, 35511), False, 'import pygame\n'), ((5382, 5405), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (5403, 5405), False, 'import pygame\n'), ((5566, 5589), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (5587, 5589), False, 'import pygame\n'), ((5855, 5878), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (5876, 5878), False, 'import pygame\n'), ((18336, 18359), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (18357, 18359), False, 'import pygame\n'), ((11046, 11065), 'random.randrange', 'random.randrange', (['(4)'], {}), '(4)\n', (11062, 11065), False, 'import random\n'), ((28549, 28572), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (28570, 28572), False, 'import pygame\n'), ((27321, 27344), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (27342, 27344), False, 'import pygame\n'), ((35621, 35644), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (35642, 35644), False, 'import pygame\n')]
|
# coding=utf-8
""" General tools for the Jupyter Notebook and Lab """
from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, \
VBox, Button, Box, ToggleButton, IntSlider, FloatText
from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float
from .. import batch
# imports for async widgets
from multiprocessing import Pool
import time
# import EE
import ee
if not ee.data._initialized: ee.Initialize()
def create_accordion(dictionary):
""" Create an Accordion output from a dict object """
widlist = []
ini = 0
widget = Accordion()
widget.selected_index = None # this will unselect all
for key, val in dictionary.items():
if isinstance(val, dict):
newwidget = create_accordion(val)
widlist.append(newwidget)
elif isinstance(val, list):
# tranform list to a dictionary
dictval = {k: v for k, v in enumerate(val)}
newwidget = create_accordion(dictval)
widlist.append(newwidget)
else:
value = HTML(str(val))
widlist.append(value)
widget.set_title(ini, key)
ini += 1
widget.children = widlist
return widget
def create_object_output(object):
''' Create a output Widget for Images, Geometries and Features '''
ty = object.__class__.__name__
if ty == 'Image':
info = object.getInfo()
image_id = info['id'] if 'id' in info else 'No Image ID'
prop = info['properties']
bands = info['bands']
bands_names = [band['id'] for band in bands]
bands_types = [band['data_type']['precision'] for band in bands]
bands_crs = [band['crs'] for band in bands]
new_band_names = ['<li>{} - {} - {}</li>'.format(name, ty, epsg) for name, ty, epsg in zip(bands_names, bands_types, bands_crs)]
new_properties = ['<li><b>{}</b>: {}</li>'.format(key, val) for key, val in prop.items()]
header = HTML('<b>Image id:</b> {id} </br>'.format(id=image_id))
bands_wid = HTML('<ul>'+''.join(new_band_names)+'</ul>')
prop_wid = HTML('<ul>'+''.join(new_properties)+'</ul>')
acc = Accordion([bands_wid, prop_wid])
acc.set_title(0, 'Bands')
acc.set_title(1, 'Properties')
acc.selected_index = None # this will unselect all
return VBox([header, acc])
elif ty == 'FeatureCollection':
try:
info = object.getInfo()
except:
print('FeatureCollection limited to 4000 features')
info = object.limit(4000)
return create_accordion(info)
else:
info = object.getInfo()
return create_accordion(info)
def create_async_output(object, widget):
child = create_object_output(object)
widget.children = [child]
# def recrusive_delete_asset_to_widget(assetId, widget):
def recrusive_delete_asset_to_widget(args):
''' adapted version to print streaming results in a widget '''
assetId = args[0]
widget = args[1]
try:
content = ee.data.getList({'id':assetId})
except Exception as e:
widget.value = str(e)
return
if content == 0:
# delete empty colletion and/or folder
ee.data.deleteAsset(assetId)
else:
for asset in content:
path = asset['id']
ty = asset['type']
if ty == 'Image':
ee.data.deleteAsset(path)
widget.value += 'deleting {} ({})</br>'.format(path, ty)
else:
# clear output
widget.value = ''
recrusive_delete_asset_to_widget(path, widget)
# delete empty colletion and/or folder
ee.data.deleteAsset(assetId)
class CheckRow(HBox):
checkbox = Instance(Checkbox)
widget = Instance(Widget)
def __init__(self, widget, **kwargs):
self.checkbox = Checkbox(indent=False,
layout=Layout(flex='1 1 20', width='auto'))
self.widget = widget
super(CheckRow, self).__init__(children=(self.checkbox, self.widget),
**kwargs)
self.layout = Layout(display='flex', flex_flow='row',
align_content='flex-start')
@observe('widget')
def _ob_wid(self, change):
new = change['new']
self.children = (self.checkbox, new)
def observe_checkbox(self, handler, extra_params={}, **kwargs):
""" set handler for the checkbox widget. Use the property 'widget' of
change to get the corresponding widget
:param handler: callback function
:type handler: function
:param extra_params: extra parameters that can be passed to the handler
:type extra_params: dict
:param kwargs: parameters from traitlets.observe
:type kwargs: dict
"""
# by default only observe value
name = kwargs.get('names', 'value')
def proxy_handler(handler):
def wrap(change):
change['widget'] = self.widget
for key, val in extra_params.items():
change[key] = val
return handler(change)
return wrap
self.checkbox.observe(proxy_handler(handler), names=name, **kwargs)
def observe_widget(self, handler, extra_params={}, **kwargs):
""" set handler for the widget alongside de checkbox
:param handler: callback function
:type handler: function
:param extra_params: extra parameters that can be passed to the handler
:type extra_params: dict
:param kwargs: parameters from traitlets.observe
:type kwargs: dict
"""
def proxy_handler(handler):
def wrap(change):
change['checkbox'] = self.checkbox
for key, val in extra_params.items():
change[key] = val
return handler(change)
return wrap
self.widget.observe(proxy_handler(handler), **kwargs)
class CheckAccordion(VBox):
widgets = Tuple()
def __init__(self, widgets, **kwargs):
# self.widgets = widgets
super(CheckAccordion, self).__init__(**kwargs)
self.widgets = widgets
@observe('widgets')
def _on_child(self, change):
new = change['new'] # list of any widget
newwidgets = []
for widget in new:
# constract the widget
acc = Accordion(children=(widget,))
acc.selected_index = None # this will unselect all
# create a CheckRow
checkrow = CheckRow(acc)
newwidgets.append(checkrow)
newchildren = tuple(newwidgets)
self.children = newchildren
def set_title(self, index, title):
''' set the title of the widget at indicated index'''
checkrow = self.children[index]
acc = checkrow.widget
acc.set_title(0, title)
def get_title(self, index):
''' get the title of the widget at indicated index'''
checkrow = self.children[index]
acc = checkrow.widget
return acc.get_title(0)
def get_check(self, index):
''' get the state of checkbox in index '''
checkrow = self.children[index]
return checkrow.checkbox.value
def set_check(self, index, state):
''' set the state of checkbox in index '''
checkrow = self.children[index]
checkrow.checkbox.value = state
def checked_rows(self):
''' return a list of indexes of checked rows '''
checked = []
for i, checkrow in enumerate(self.children):
state = checkrow.checkbox.value
if state: checked.append(i)
return checked
def get_widget(self, index):
''' get the widget in index '''
checkrow = self.children[index]
return checkrow.widget
def set_widget(self, index, widget):
''' set the widget for index '''
checkrow = self.children[index]
checkrow.widget.children = (widget,) # Accordion has 1 child
def set_row(self, index, title, widget):
''' set values for the row '''
self.set_title(index, title)
self.set_widget(index, widget)
def set_accordion_handler(self, index, handler, **kwargs):
''' set the handler for Accordion in index '''
checkrow = self.children[index]
checkrow.observe_widget(handler, names=['selected_index'], **kwargs)
def set_checkbox_handler(self, index, handler, **kwargs):
''' set the handler for CheckBox in index '''
checkrow = self.children[index]
checkrow.observe_checkbox(handler, **kwargs)
class AssetManager(VBox):
""" Asset Manager Widget """
POOL_SIZE = 5
def __init__(self, map=None, **kwargs):
super(AssetManager, self).__init__(**kwargs)
# Thumb height
self.thumb_height = kwargs.get('thumb_height', 300)
self.root_path = ee.data.getAssetRoots()[0]['id']
# Map
self.map = map
# Header
self.reload_button = Button(description='Reload')
self.add2map = Button(description='Add to Map')
self.delete = Button(description='Delete Selected')
header_children = [self.reload_button, self.delete]
# Add2map only if a Map has been passed
if self.map:
header_children.append(self.add2map)
self.header = HBox(header_children)
# Reload handler
def reload_handler(button):
new_accordion = self.core(self.root_path)
# Set VBox children
self.children = [self.header, new_accordion]
# add2map handler
def add2map_handler(themap):
def wrap(button):
selected_rows = self.get_selected()
for asset, ty in selected_rows.items():
if ty == 'Image':
im = ee.Image(asset)
themap.addLayer(im, {}, asset)
elif ty == 'ImageCollection':
col = ee.ImageCollection(asset)
themap.addLayer(col)
return wrap
# Set reload handler
# self.reload_button.on_click(reload_handler)
self.reload_button.on_click(self.reload)
# Set reload handler
self.add2map.on_click(add2map_handler(self.map))
# Set delete selected handler
self.delete.on_click(self.delete_selected)
# First Accordion
self.root_acc = self.core(self.root_path)
# Set VBox children
self.children = [self.header, self.root_acc]
def delete_selected(self, button=None):
''' function to delete selected assets '''
selected = self.get_selected()
# Output widget
output = HTML('')
def handle_yes(button):
self.children = [self.header, output]
pool = Pool(self.POOL_SIZE)
# pool = pp.ProcessPool(self.POOL_SIZE)
if selected:
''' OLD
for asset, ty in selected.items():
recrusive_delete_asset_to_widget(asset, output)
args = []
for asset, ty in selected.items():
args.append((asset, output))
# pool.map(recrusive_delete_asset_to_widget, args)
# pool.map(test2, args)
# pool.close()
# pool.join()
'''
assets = [ass for ass in selected.keys()]
pool.map(batch.recrusive_delete_asset, assets)
# TODO: cant map recrusive_delete_asset_to_widget because the passed widget is not pickable
pool.close()
pool.join()
# when deleting end, reload
self.reload()
def handle_no(button):
self.reload()
def handle_cancel(button):
self.reload()
assets_str = ['{} ({})'.format(ass, ty) for ass, ty in selected.items()]
assets_str = '</br>'.join(assets_str)
confirm = ConfirmationWidget('<h2>Delete {} assets</h2>'.format(len(selected.keys())),
'The following assets are going to be deleted: </br> {} </br> Are you sure?'.format(assets_str),
handle_yes=handle_yes,
handle_no=handle_no,
handle_cancel=handle_cancel)
self.children = [self.header, confirm, output]
def reload(self, button=None):
new_accordion = self.core(self.root_path)
# Set VBox children
self.children = [self.header, new_accordion]
def get_selected(self):
''' get the selected assets
:return: a dictionary with the type as key and asset root as value
:rtype: dict
'''
def wrap(checkacc, assets={}, root=self.root_path):
children = checkacc.children # list of CheckRow
for child in children:
checkbox = child.children[0] # checkbox of the CheckRow
widget = child.children[1] # widget of the CheckRow (Accordion)
state = checkbox.value
if isinstance(widget.children[0], CheckAccordion):
title = widget.get_title(0).split(' ')[0]
new_root = '{}/{}'.format(root, title)
newselection = wrap(widget.children[0], assets, new_root)
assets = newselection
else:
if state:
title = child.children[1].get_title(0)
# remove type that is between ()
ass = title.split(' ')[0]
ty = title.split(' ')[1][1:-1]
# append root
ass = '{}/{}'.format(root, ass)
# append title to selected list
# assets.append(title)
assets[ass] = ty
return assets
# get selection on root
begin = self.children[1] # CheckAccordion of root
return wrap(begin)
def core(self, path):
# Get Assets data
root_list = ee.data.getList({'id': path})
# empty lists to fill with ids, types, widgets and paths
ids = []
types = []
widgets = []
paths = []
# iterate over the list of the root
for content in root_list:
# get data
id = content['id']
ty = content['type']
# append data to lists
paths.append(id)
ids.append(id.replace(path+'/', ''))
types.append(ty)
wid = HTML('Loading..')
widgets.append(wid)
# super(AssetManager, self).__init__(widgets=widgets, **kwargs)
# self.widgets = widgets
asset_acc = CheckAccordion(widgets=widgets)
# TODO: set handler for title's checkbox: select all checkboxes
# set titles
for i, (title, ty) in enumerate(zip(ids, types)):
final_title = '{title} ({type})'.format(title=title, type=ty)
asset_acc.set_title(i, final_title)
def handle_new_accordion(change):
path = change['path']
index = change['index']
ty = change['type']
if ty == 'Folder' or ty == 'ImageCollection':
wid = self.core(path)
else:
image = ee.Image(path)
info = image.getInfo()
width = int(info['bands'][0]['dimensions'][0])
height = int(info['bands'][0]['dimensions'][1])
new_width = int(self.thumb_height)/height*width
thumb = image.getThumbURL({'dimensions':[new_width, self.thumb_height]})
# wid = ImageWid(value=thumb)
wid_i = HTML('<img src={}>'.format(thumb))
wid_info = create_accordion(info)
wid = HBox(children=[wid_i, wid_info])
asset_acc.set_widget(index, wid)
def handle_checkbox(change):
path = change['path']
widget = change['widget'] # Accordion
wid_children = widget.children[0] # can be a HTML or CheckAccordion
new = change['new']
if isinstance(wid_children, CheckAccordion): # set all checkboxes to True
for child in wid_children.children:
check = child.children[0]
check.value = new
# set handlers
for i, (path, ty) in enumerate(zip(paths, types)):
asset_acc.set_accordion_handler(
i, handle_new_accordion,
extra_params={'path':path, 'index':i, 'type': ty}
)
asset_acc.set_checkbox_handler(
i, handle_checkbox,
extra_params={'path':path, 'index':i, 'type': ty}
)
return asset_acc
class TaskManager(VBox):
def __init__(self, **kwargs):
super(TaskManager, self).__init__(**kwargs)
# Header
self.checkbox = Checkbox(indent=False,
layout=Layout(flex='1 1 20', width='auto'))
self.cancel_selected = Button(description='Cancel Selected',
tooltip='Cancel all selected tasks')
self.cancel_all = Button(description='Cancell All',
tooltip='Cancel all tasks')
self.refresh = Button(description='Refresh',
tooltip='Refresh Tasks List')
self.autorefresh = ToggleButton(description='auto-refresh',
tooltip='click to enable/disable autorefresh')
self.slider = IntSlider(min=1, max=10, step=1, value=5)
self.hbox = HBox([self.checkbox, self.refresh,
self.cancel_selected, self.cancel_all,
self.autorefresh, self.slider])
# Tabs for COMPLETED, FAILED, etc
self.tabs = Tab()
# Tabs index
self.tab_index = {0: 'RUNNING',
1: 'COMPLETED',
2: 'FAILED',
3: 'CANCELED',
4: 'UNKNOWN'}
self.taskVBox = VBox()
self.runningVBox = VBox()
self.completedVBox = VBox()
self.failedVBox = VBox()
self.canceledVBox = VBox()
self.unknownVBox = VBox()
self.tab_widgets_rel = {'RUNNING': self.runningVBox,
'COMPLETED': self.completedVBox,
'FAILED': self.failedVBox,
'CANCELED': self.canceledVBox,
'UNKNOWN': self.unknownVBox}
# Create Tabs
self.tab_widgets = []
for key, val in self.tab_index.items():
widget = self.tab_widgets_rel[val]
self.tab_widgets.append(widget)
self.tabs.children = self.tab_widgets
self.tabs.set_title(key, val)
''' autorefresh
def update_task_list(widget):
# widget is a VBox
tasklist = ee.data.getTaskList()
widlist = []
for task in tasklist:
accordion = create_accordion(task)
if task.has_key('description'):
name = '{} ({})'.format(task['description'], task['state'])
else:
name = '{} ({})'.format(task['output_url'][0].split('/')[-1], task['state'])
mainacc = Accordion(children=(accordion, ))
mainacc.set_title(0, name)
mainacc.selected_index = None
wid = CheckRow(mainacc)
#wid = CheckRow(accordion)
widlist.append(wid)
widget.children = tuple(widlist)
'''
def loop(widget):
while True:
self.update_task_list()(self.refresh)
time.sleep(self.slider.value)
# First widget
self.update_task_list(vbox=self.runningVBox)(self.refresh)
# self.children = (self.hbox, self.taskVBox)
self.children = (self.hbox, self.tabs)
# Set on_click for refresh button
self.refresh.on_click(self.update_task_list(vbox=self.selected_tab()))
''' autorefresh
thread = threading.Thread(target=loop, args=(self.taskVBox,))
thread.start()
'''
# Set on_clicks
self.cancel_all.on_click(self.cancel_all_click)
self.cancel_selected.on_click(self.cancel_selected_click)
# self.autorefresh
def autorefresh_loop(self):
pass
def tab_handler(self, change):
if change['name'] == 'selected_index':
self.update_task_list()(self.refresh)
def selected_tab(self):
''' get the selected tab '''
index = self.tabs.selected_index
tab_name = self.tab_index[index]
return self.tab_widgets_rel[tab_name]
def update_task_list(self, **kwargs):
def wrap(button):
self.selected_tab().children = (HTML('Loading...'),)
try:
tasklist = ee.data.getTaskList()
# empty lists
running_list = []
completed_list = []
failed_list = []
canceled_list = []
unknown_list = []
all_list = {'RUNNING': running_list, 'COMPLETED': completed_list,
'FAILED': failed_list, 'CANCELED': canceled_list,
'UNKNOWN': unknown_list}
for task in tasklist:
state = task['state']
accordion = create_accordion(task)
if task['state'] == 'COMPLETED':
start = int(task['start_timestamp_ms'])
end = int(task['creation_timestamp_ms'])
seconds = float((start-end))/1000
name = '{} ({} sec)'.format(task['output_url'][0].split('/')[-1],
seconds)
else:
name = '{}'.format(task['description'])
# Accordion for CheckRow widget
mainacc = Accordion(children=(accordion, ))
mainacc.set_title(0, name)
mainacc.selected_index = None
# CheckRow
wid = CheckRow(mainacc)
# Append widget to the CORRECT list
all_list[state].append(wid)
# Assign Children
self.runningVBox.children = tuple(running_list)
self.completedVBox.children = tuple(completed_list)
self.failedVBox.children = tuple(failed_list)
self.canceledVBox.children = tuple(canceled_list)
self.unknownVBox.children = tuple(unknown_list)
except Exception as e:
self.selected_tab().children = (HTML(str(e)),)
return wrap
def get_selected(self):
""" Get selected Tasks
:return: a list of the selected indexes
"""
selected = []
children = self.selected_tab().children
for i, child in enumerate(children):
# checkrow = child.children[0] # child is an accordion
state = child.checkbox.value
if state: selected.append(i)
return selected
def get_taskid(self, index):
# Get selected Tab
selected_wid = self.selected_tab() # VBox
# Children of the Tab's VBox
children = selected_wid.children
# Get CheckRow that corresponds to the passed index
checkrow = children[index]
# Get main accordion
mainacc = checkrow.widget
# Get details accordion
selectedacc = mainacc.children[0]
for n, child in enumerate(selectedacc.children):
title = selectedacc.get_title(n)
if title == 'id':
return child.value
def get_selected_taskid(self):
selected = self.get_selected()
selected_wid = self.selected_tab() # VBox
children = selected_wid.children
taskid_list = []
for select in selected:
'''
checkrow = children[select]
mainacc = checkrow.widget
selectedacc = mainacc.children[0]
for n, child in enumerate(selectedacc.children):
title = selectedacc.get_title(n)
if title == 'id':
taskid_list.append(child.value)
'''
taskid = self.get_taskid(select)
taskid_list.append(taskid)
return taskid_list
def cancel_selected_click(self, button):
selected = self.get_selected_taskid()
for taskid in selected:
try:
ee.data.cancelTask(taskid)
except:
continue
self.update_task_list()(self.refresh)
def cancel_all_click(self, button):
selected_wid = self.selected_tab() # VBox
children = selected_wid.children
for n, child in enumerate(children):
taskid = self.get_taskid(n)
try:
ee.data.cancelTask(taskid)
except:
continue
self.update_task_list()(self.refresh)
class ConfirmationWidget(VBox):
def __init__(self, title='Confirmation', legend='Are you sure?',
handle_yes=None, handle_no=None, handle_cancel=None, **kwargs):
super(ConfirmationWidget, self).__init__(**kwargs)
# Title Widget
self.title = title
self.title_widget = HTML(self.title)
# Legend Widget
self.legend = legend
self.legend_widget = HTML(self.legend)
# Buttons
self.yes = Button(description='Yes')
handler_yes = handle_yes if handle_yes else lambda x: x
self.yes.on_click(handler_yes)
self.no = Button(description='No')
handler_no = handle_no if handle_no else lambda x: x
self.no.on_click(handler_no)
self.cancel = Button(description='Cancel')
handler_cancel = handle_cancel if handle_cancel else lambda x: x
self.cancel.on_click(handler_cancel)
self.buttons = HBox([self.yes, self.no, self.cancel])
self.children = [self.title_widget, self.legend_widget, self.buttons]
class RealBox(Box):
""" Real Box Layout
items:
[[widget1, widget2],
[widget3, widget4]]
"""
items = List()
width = Int()
border_inside = Unicode()
border_outside = Unicode()
def __init__(self, **kwargs):
super(RealBox, self).__init__(**kwargs)
self.layout = Layout(display='flex', flex_flow='column',
border=self.border_outside)
def max_row_elements(self):
maxn = 0
for el in self.items:
n = len(el)
if n>maxn:
maxn = n
return maxn
@observe('items')
def _ob_items(self, change):
layout_columns = Layout(display='flex', flex_flow='row')
new = change['new']
children = []
# recompute size
maxn = self.max_row_elements()
width = 100/maxn
for el in new:
for wid in el:
if not wid.layout.width:
if self.width:
wid.layout = Layout(width='{}px'.format(self.width),
border=self.border_inside)
else:
wid.layout = Layout(width='{}%'.format(width),
border=self.border_inside)
hbox = Box(el, layout=layout_columns)
children.append(hbox)
self.children = children
class FloatBandWidget(HBox):
min = Float(0)
max = Float(1)
def __init__(self, **kwargs):
super(FloatBandWidget, self).__init__(**kwargs)
self.minWid = FloatText(value=self.min, description='min')
self.maxWid = FloatText(value=self.max, description='max')
self.children = [self.minWid, self.maxWid]
self.observe(self._ob_min, names=['min'])
self.observe(self._ob_max, names=['max'])
def _ob_min(self, change):
new = change['new']
self.minWid.value = new
def _ob_max(self, change):
new = change['new']
self.maxWid.value = new
|
[
"traitlets.Int",
"traitlets.Float",
"ipywidgets.Box",
"ipywidgets.ToggleButton",
"traitlets.List",
"ee.data.cancelTask",
"ipywidgets.Button",
"ipywidgets.Tab",
"ipywidgets.Accordion",
"ipywidgets.Layout",
"ee.Initialize",
"traitlets.Instance",
"ipywidgets.HTML",
"ee.data.getList",
"ipywidgets.IntSlider",
"time.sleep",
"ipywidgets.HBox",
"traitlets.Tuple",
"multiprocessing.Pool",
"ipywidgets.VBox",
"ipywidgets.FloatText",
"ee.data.getAssetRoots",
"ee.ImageCollection",
"ee.Image",
"traitlets.Unicode",
"ee.data.deleteAsset",
"ee.data.getTaskList",
"traitlets.observe"
] |
[((430, 445), 'ee.Initialize', 'ee.Initialize', ([], {}), '()\n', (443, 445), False, 'import ee\n'), ((582, 593), 'ipywidgets.Accordion', 'Accordion', ([], {}), '()\n', (591, 593), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((3778, 3796), 'traitlets.Instance', 'Instance', (['Checkbox'], {}), '(Checkbox)\n', (3786, 3796), False, 'from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float\n'), ((3810, 3826), 'traitlets.Instance', 'Instance', (['Widget'], {}), '(Widget)\n', (3818, 3826), False, 'from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float\n'), ((4274, 4291), 'traitlets.observe', 'observe', (['"""widget"""'], {}), "('widget')\n", (4281, 4291), False, 'from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float\n'), ((6093, 6100), 'traitlets.Tuple', 'Tuple', ([], {}), '()\n', (6098, 6100), False, 'from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float\n'), ((6270, 6288), 'traitlets.observe', 'observe', (['"""widgets"""'], {}), "('widgets')\n", (6277, 6288), False, 'from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float\n'), ((26768, 26774), 'traitlets.List', 'List', ([], {}), '()\n', (26772, 26774), False, 'from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float\n'), ((26787, 26792), 'traitlets.Int', 'Int', ([], {}), '()\n', (26790, 26792), False, 'from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float\n'), ((26813, 26822), 'traitlets.Unicode', 'Unicode', ([], {}), '()\n', (26820, 26822), False, 'from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float\n'), ((26844, 26853), 'traitlets.Unicode', 'Unicode', ([], {}), '()\n', (26851, 26853), False, 'from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float\n'), ((27238, 27254), 'traitlets.observe', 'observe', (['"""items"""'], {}), "('items')\n", (27245, 27254), False, 'from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float\n'), ((28092, 28100), 'traitlets.Float', 'Float', (['(0)'], {}), '(0)\n', (28097, 28100), False, 'from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float\n'), ((28111, 28119), 'traitlets.Float', 'Float', (['(1)'], {}), '(1)\n', (28116, 28119), False, 'from traitlets import List, Unicode, observe, Instance, Tuple, Int, Float\n'), ((2175, 2207), 'ipywidgets.Accordion', 'Accordion', (['[bands_wid, prop_wid]'], {}), '([bands_wid, prop_wid])\n', (2184, 2207), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((2356, 2375), 'ipywidgets.VBox', 'VBox', (['[header, acc]'], {}), '([header, acc])\n', (2360, 2375), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((3052, 3084), 'ee.data.getList', 'ee.data.getList', (["{'id': assetId}"], {}), "({'id': assetId})\n", (3067, 3084), False, 'import ee\n'), ((3233, 3261), 'ee.data.deleteAsset', 'ee.data.deleteAsset', (['assetId'], {}), '(assetId)\n', (3252, 3261), False, 'import ee\n'), ((3710, 3738), 'ee.data.deleteAsset', 'ee.data.deleteAsset', (['assetId'], {}), '(assetId)\n', (3729, 3738), False, 'import ee\n'), ((4171, 4238), 'ipywidgets.Layout', 'Layout', ([], {'display': '"""flex"""', 'flex_flow': '"""row"""', 'align_content': '"""flex-start"""'}), "(display='flex', flex_flow='row', align_content='flex-start')\n", (4177, 4238), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((9102, 9130), 'ipywidgets.Button', 'Button', ([], {'description': '"""Reload"""'}), "(description='Reload')\n", (9108, 9130), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((9154, 9186), 'ipywidgets.Button', 'Button', ([], {'description': '"""Add to Map"""'}), "(description='Add to Map')\n", (9160, 9186), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((9209, 9246), 'ipywidgets.Button', 'Button', ([], {'description': '"""Delete Selected"""'}), "(description='Delete Selected')\n", (9215, 9246), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((9449, 9470), 'ipywidgets.HBox', 'HBox', (['header_children'], {}), '(header_children)\n', (9453, 9470), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((10837, 10845), 'ipywidgets.HTML', 'HTML', (['""""""'], {}), "('')\n", (10841, 10845), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((14321, 14350), 'ee.data.getList', 'ee.data.getList', (["{'id': path}"], {}), "({'id': path})\n", (14336, 14350), False, 'import ee\n'), ((17355, 17429), 'ipywidgets.Button', 'Button', ([], {'description': '"""Cancel Selected"""', 'tooltip': '"""Cancel all selected tasks"""'}), "(description='Cancel Selected', tooltip='Cancel all selected tasks')\n", (17361, 17429), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((17494, 17555), 'ipywidgets.Button', 'Button', ([], {'description': '"""Cancell All"""', 'tooltip': '"""Cancel all tasks"""'}), "(description='Cancell All', tooltip='Cancel all tasks')\n", (17500, 17555), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((17612, 17671), 'ipywidgets.Button', 'Button', ([], {'description': '"""Refresh"""', 'tooltip': '"""Refresh Tasks List"""'}), "(description='Refresh', tooltip='Refresh Tasks List')\n", (17618, 17671), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((17729, 17821), 'ipywidgets.ToggleButton', 'ToggleButton', ([], {'description': '"""auto-refresh"""', 'tooltip': '"""click to enable/disable autorefresh"""'}), "(description='auto-refresh', tooltip=\n 'click to enable/disable autorefresh')\n", (17741, 17821), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((17879, 17920), 'ipywidgets.IntSlider', 'IntSlider', ([], {'min': '(1)', 'max': '(10)', 'step': '(1)', 'value': '(5)'}), '(min=1, max=10, step=1, value=5)\n', (17888, 17920), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((17941, 18050), 'ipywidgets.HBox', 'HBox', (['[self.checkbox, self.refresh, self.cancel_selected, self.cancel_all, self.\n autorefresh, self.slider]'], {}), '([self.checkbox, self.refresh, self.cancel_selected, self.cancel_all,\n self.autorefresh, self.slider])\n', (17945, 18050), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((18162, 18167), 'ipywidgets.Tab', 'Tab', ([], {}), '()\n', (18165, 18167), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((18416, 18422), 'ipywidgets.VBox', 'VBox', ([], {}), '()\n', (18420, 18422), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((18451, 18457), 'ipywidgets.VBox', 'VBox', ([], {}), '()\n', (18455, 18457), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((18487, 18493), 'ipywidgets.VBox', 'VBox', ([], {}), '()\n', (18491, 18493), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((18520, 18526), 'ipywidgets.VBox', 'VBox', ([], {}), '()\n', (18524, 18526), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((18555, 18561), 'ipywidgets.VBox', 'VBox', ([], {}), '()\n', (18559, 18561), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((18589, 18595), 'ipywidgets.VBox', 'VBox', ([], {}), '()\n', (18593, 18595), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((25902, 25918), 'ipywidgets.HTML', 'HTML', (['self.title'], {}), '(self.title)\n', (25906, 25918), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((26001, 26018), 'ipywidgets.HTML', 'HTML', (['self.legend'], {}), '(self.legend)\n', (26005, 26018), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((26056, 26081), 'ipywidgets.Button', 'Button', ([], {'description': '"""Yes"""'}), "(description='Yes')\n", (26062, 26081), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((26204, 26228), 'ipywidgets.Button', 'Button', ([], {'description': '"""No"""'}), "(description='No')\n", (26210, 26228), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((26350, 26378), 'ipywidgets.Button', 'Button', ([], {'description': '"""Cancel"""'}), "(description='Cancel')\n", (26356, 26378), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((26521, 26559), 'ipywidgets.HBox', 'HBox', (['[self.yes, self.no, self.cancel]'], {}), '([self.yes, self.no, self.cancel])\n', (26525, 26559), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((26960, 27030), 'ipywidgets.Layout', 'Layout', ([], {'display': '"""flex"""', 'flex_flow': '"""column"""', 'border': 'self.border_outside'}), "(display='flex', flex_flow='column', border=self.border_outside)\n", (26966, 27030), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((27313, 27352), 'ipywidgets.Layout', 'Layout', ([], {'display': '"""flex"""', 'flex_flow': '"""row"""'}), "(display='flex', flex_flow='row')\n", (27319, 27352), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((28233, 28277), 'ipywidgets.FloatText', 'FloatText', ([], {'value': 'self.min', 'description': '"""min"""'}), "(value=self.min, description='min')\n", (28242, 28277), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((28300, 28344), 'ipywidgets.FloatText', 'FloatText', ([], {'value': 'self.max', 'description': '"""max"""'}), "(value=self.max, description='max')\n", (28309, 28344), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((6475, 6504), 'ipywidgets.Accordion', 'Accordion', ([], {'children': '(widget,)'}), '(children=(widget,))\n', (6484, 6504), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((10948, 10968), 'multiprocessing.Pool', 'Pool', (['self.POOL_SIZE'], {}), '(self.POOL_SIZE)\n', (10952, 10968), False, 'from multiprocessing import Pool\n'), ((14819, 14836), 'ipywidgets.HTML', 'HTML', (['"""Loading.."""'], {}), "('Loading..')\n", (14823, 14836), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((27953, 27983), 'ipywidgets.Box', 'Box', (['el'], {'layout': 'layout_columns'}), '(el, layout=layout_columns)\n', (27956, 27983), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((3410, 3435), 'ee.data.deleteAsset', 'ee.data.deleteAsset', (['path'], {}), '(path)\n', (3429, 3435), False, 'import ee\n'), ((3956, 3991), 'ipywidgets.Layout', 'Layout', ([], {'flex': '"""1 1 20"""', 'width': '"""auto"""'}), "(flex='1 1 20', width='auto')\n", (3962, 3991), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((8984, 9007), 'ee.data.getAssetRoots', 'ee.data.getAssetRoots', ([], {}), '()\n', (9005, 9007), False, 'import ee\n'), ((15585, 15599), 'ee.Image', 'ee.Image', (['path'], {}), '(path)\n', (15593, 15599), False, 'import ee\n'), ((16099, 16131), 'ipywidgets.HBox', 'HBox', ([], {'children': '[wid_i, wid_info]'}), '(children=[wid_i, wid_info])\n', (16103, 16131), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((17287, 17322), 'ipywidgets.Layout', 'Layout', ([], {'flex': '"""1 1 20"""', 'width': '"""auto"""'}), "(flex='1 1 20', width='auto')\n", (17293, 17322), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((20132, 20161), 'time.sleep', 'time.sleep', (['self.slider.value'], {}), '(self.slider.value)\n', (20142, 20161), False, 'import time\n'), ((21263, 21281), 'ipywidgets.HTML', 'HTML', (['"""Loading..."""'], {}), "('Loading...')\n", (21267, 21281), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((21328, 21349), 'ee.data.getTaskList', 'ee.data.getTaskList', ([], {}), '()\n', (21347, 21349), False, 'import ee\n'), ((25095, 25121), 'ee.data.cancelTask', 'ee.data.cancelTask', (['taskid'], {}), '(taskid)\n', (25113, 25121), False, 'import ee\n'), ((25463, 25489), 'ee.data.cancelTask', 'ee.data.cancelTask', (['taskid'], {}), '(taskid)\n', (25481, 25489), False, 'import ee\n'), ((22468, 22500), 'ipywidgets.Accordion', 'Accordion', ([], {'children': '(accordion,)'}), '(children=(accordion,))\n', (22477, 22500), False, 'from ipywidgets import HTML, Tab, Accordion, Checkbox, HBox, Layout, Widget, VBox, Button, Box, ToggleButton, IntSlider, FloatText\n'), ((9945, 9960), 'ee.Image', 'ee.Image', (['asset'], {}), '(asset)\n', (9953, 9960), False, 'import ee\n'), ((10096, 10121), 'ee.ImageCollection', 'ee.ImageCollection', (['asset'], {}), '(asset)\n', (10114, 10121), False, 'import ee\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.