repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
680k
|
---|---|---|---|---|
jmarine/ezeeai | ezeeai/utils/hooks.py | 091b4ce3bc5794c534084bff3301b15ba8a9be1a | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.basic_session_run_hooks import NeverTriggerTimer, SecondOrStepTimer
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.util.tf_export import tf_export
import smtplib
from email.mime.text import MIMEText
@tf_export("train.EmailAtStepHook")
class EmailAtStepHook(session_run_hook.SessionRunHook):
def __init__(self, user_info, server_info, every_n_iter=None, every_n_secs=None,
at_end=False):
only_log_at_end = (
at_end and (every_n_iter is None) and (every_n_secs is None))
if (not only_log_at_end and
(every_n_iter is None) == (every_n_secs is None)):
raise ValueError(
"either at_end and/or exactly one of every_n_iter and every_n_secs "
"must be provided.")
if every_n_iter is not None and every_n_iter <= 0:
raise ValueError("invalid every_n_iter=%s." % every_n_iter)
self._timer = (
NeverTriggerTimer() if only_log_at_end else
SecondOrStepTimer(every_secs=every_n_secs, every_steps=every_n_iter))
self._log_at_end = at_end
self._user_info = user_info
self._server_info = server_info
self._timer.reset()
self._iter_count = 0
def begin(self):
pass
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)
def after_run(self, run_context, run_values):
_ = run_context
if self._should_trigger:
self._send_email()
self._iter_count += 1
def end(self, session):
if self._log_at_end:
self._send_email()
def _send_email(self):
smtpserver = 'smtp.gmail.com:587'
header = 'From: %s' % self._server_info['email_address']
header += 'To: %s' % self._user_info['email_address']
header += 'Subject: %s' % "Training finished"
message = header + "Training finished"
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(self._server_info['login'], self._server_info['password'])
problems = server.sendmail(self._server_info['email_address'], self._user_info['email_address'], message)
server.quit()
| [((448, 482), 'tensorflow.python.util.tf_export.tf_export', 'tf_export', (['"""train.EmailAtStepHook"""'], {}), "('train.EmailAtStepHook')\n", (457, 482), False, 'from tensorflow.python.util.tf_export import tf_export\n'), ((2249, 2273), 'smtplib.SMTP', 'smtplib.SMTP', (['smtpserver'], {}), '(smtpserver)\n', (2261, 2273), False, 'import smtplib\n'), ((1186, 1205), 'tensorflow.python.training.basic_session_run_hooks.NeverTriggerTimer', 'NeverTriggerTimer', ([], {}), '()\n', (1203, 1205), False, 'from tensorflow.python.training.basic_session_run_hooks import NeverTriggerTimer, SecondOrStepTimer\n'), ((1242, 1310), 'tensorflow.python.training.basic_session_run_hooks.SecondOrStepTimer', 'SecondOrStepTimer', ([], {'every_secs': 'every_n_secs', 'every_steps': 'every_n_iter'}), '(every_secs=every_n_secs, every_steps=every_n_iter)\n', (1259, 1310), False, 'from tensorflow.python.training.basic_session_run_hooks import NeverTriggerTimer, SecondOrStepTimer\n')] |
whiletrace/dwellinglybackend | tests/factory_fixtures/dummy_resource.py | e766b3d612b4c92fd337b82498ab8ef68bd95e1f | from flask import request
from flask_restful import Resource
from utils.gatekeeper import allowed_params
class DummyResource(Resource):
dummy_params = set()
@allowed_params(dummy_params)
def put(self):
return request.json
| [((169, 197), 'utils.gatekeeper.allowed_params', 'allowed_params', (['dummy_params'], {}), '(dummy_params)\n', (183, 197), False, 'from utils.gatekeeper import allowed_params\n')] |
JiniousChoi/encyclopedia-in-code | quizzes/00.organize.me/hackerrank/sorted_set/server2.py | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | #!/usr/bin/env python3
import socket, threading
from queue import Queue
import sys, struct
# NOTE: Use this path to create the UDS Server socket
SERVER_SOCKET_PATH = "./socket";
class Result:
def __init__(self):
self._evt = threading.Event()
self._result = None
def set_result(self, value):
self._result = value
self._evt.set()
def result(self):
self._evt.wait()
return self._result
class ActorExit(Exception):
pass
class Actor(object):
def __init__(self):
self._mailbox = Queue()
def send(self, msg):
self._mailbox.put(msg)
def recv(self):
msg = self._mailbox.get()
if msg is ActorExit:
raise ActorExit()
return msg
def close(self):
self.send(ActorExit)
def start(self):
self._terminated = threading.Event()
t = threading.Thread(target=self._bootstrap)
t.daemon = True
t.start()
def _bootstrap(self):
try:
self.run()
except ActorExit:
pass
finally:
self._terminated.set()
def join(self):
self._terminated.wait()
def run(self):
while True:
msg = self.recv()
class Worker(Actor):
def __init__(self):
super().__init__()
self.db = {}
def submit(self, values):
r = Result()
self.send((values, r))
return r
def run(self):
while True:
values, r = self.recv()
r.set_result(self.execute(values))
def execute(self, values):
cmd, *opts = values
print('[*]', cmd, opts)
if cmd == 1: #add
s, k, v = opts
self.db.setdefault(s, {})
self.db[s][k] = v
return [0]
elif cmd == 2: #remove
s, k = opts
if s in self.db and k in self.db[s]:
self.db[s].pop(k)
return [0]
elif cmd == 3: #get size
s = opts[0]
size = len(self.db[s]) if s in self.db else 0
return [1, size]
elif cmd == 4: #get value
s, k = opts
if s in self.db and k in self.db[s]:
score = self.db[s][k]
else:
score = 0
return [1, score]
elif cmd == 5: #range
*sets, _, lower, upper = opts
res = []
for s in sets:
if s not in self.db:
continue
for k,v in self.db[s].items():
if lower <= v <= upper:
res.append((k,v))
res.sort()
return [len(res)*2] + [e for kv in res for e in kv]
elif cmd == 6: #disconnect
return None
else:
raise Exception("Not supported CMD(%s)" % (cmd))
FMT = "!L"
def read_number_from_socket(connection):
return struct.unpack(FMT, connection.recv(4))[0]
def write_number_to_socket(connection, number):
connection.send(struct.pack(FMT, number))
def process_client_connection(connection, worker):
while True:
value_num = read_number_from_socket(connection)
values = []
for _ in range(value_num):
values.append(read_number_from_socket(connection))
res = worker.submit(values)
if res.result() == None:
break
for num in res.result():
write_number_to_socket(connection, num)
connection.close()
def main():
worker = Worker()
worker.start()
s = socket.socket(socket.AF_UNIX)
s.bind(SERVER_SOCKET_PATH)
s.listen(1)
while True:
cl, addr = s.accept()
t = threading.Thread(target = process_client_connection, args=(cl, worker))
t.start()
#worker.close()
s.close()
if __name__ == '__main__':
main()
| [((3692, 3721), 'socket.socket', 'socket.socket', (['socket.AF_UNIX'], {}), '(socket.AF_UNIX)\n', (3705, 3721), False, 'import socket, threading\n'), ((239, 256), 'threading.Event', 'threading.Event', ([], {}), '()\n', (254, 256), False, 'import socket, threading\n'), ((564, 571), 'queue.Queue', 'Queue', ([], {}), '()\n', (569, 571), False, 'from queue import Queue\n'), ((886, 903), 'threading.Event', 'threading.Event', ([], {}), '()\n', (901, 903), False, 'import socket, threading\n'), ((916, 956), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._bootstrap'}), '(target=self._bootstrap)\n', (932, 956), False, 'import socket, threading\n'), ((3114, 3138), 'struct.pack', 'struct.pack', (['FMT', 'number'], {}), '(FMT, number)\n', (3125, 3138), False, 'import sys, struct\n'), ((3827, 3896), 'threading.Thread', 'threading.Thread', ([], {'target': 'process_client_connection', 'args': '(cl, worker)'}), '(target=process_client_connection, args=(cl, worker))\n', (3843, 3896), False, 'import socket, threading\n')] |
funrunskypalace/vnpy | vnpy/gateway/rohon/__init__.py | 2d87aede685fa46278d8d3392432cc127b797926 | from .rohon_gateway import RohonGateway
| [] |
dndtools2/dndtools2 | dnd/mobile/urls.py | 6bd794349b84f3018dd0bd12712535924557c166 | from django.conf.urls import patterns, url, include
from .views import force_desktop_version, return_to_mobile_version
app_name = 'mobile'
urlpatterns = [
# force desktop
url(r'^force-desktop-version/$', force_desktop_version, name='force_desktop_version'),
# return to mobile version
url(r'^return-to-mobile-version/$', return_to_mobile_version, name='return_to_mobile_version'),
# index
url(r'^', include('dnd.mobile.index.urls')),
# character classes
url(r'^classes/', include('dnd.mobile.character_classes.urls')),
# feats
url(r'^feats/', include('dnd.mobile.feats.urls')),
# items
url(r'^items/', include('dnd.mobile.items.urls')),
# languages
url(r'^languages/', include('dnd.mobile.languages.urls')),
# monsters
url(r'^monsters/', include('dnd.mobile.monsters.urls')),
# races
url(r'^races/', include('dnd.mobile.races.urls')),
# rulebooks
url(r'^rulebooks/', include('dnd.mobile.rulebooks.urls')),
# rules
url(r'^rules/', include('dnd.mobile.rules.urls')),
# skills
url(r'^skills/', include('dnd.mobile.skills.urls')),
# spells
url(r'^spells/', include('dnd.mobile.spells.urls')),
# deities
url(r'^deities/', include('dnd.mobile.deities.urls')),
]
| [((181, 270), 'django.conf.urls.url', 'url', (['"""^force-desktop-version/$"""', 'force_desktop_version'], {'name': '"""force_desktop_version"""'}), "('^force-desktop-version/$', force_desktop_version, name=\n 'force_desktop_version')\n", (184, 270), False, 'from django.conf.urls import patterns, url, include\n'), ((304, 402), 'django.conf.urls.url', 'url', (['"""^return-to-mobile-version/$"""', 'return_to_mobile_version'], {'name': '"""return_to_mobile_version"""'}), "('^return-to-mobile-version/$', return_to_mobile_version, name=\n 'return_to_mobile_version')\n", (307, 402), False, 'from django.conf.urls import patterns, url, include\n'), ((427, 459), 'django.conf.urls.include', 'include', (['"""dnd.mobile.index.urls"""'], {}), "('dnd.mobile.index.urls')\n", (434, 459), False, 'from django.conf.urls import patterns, url, include\n'), ((509, 553), 'django.conf.urls.include', 'include', (['"""dnd.mobile.character_classes.urls"""'], {}), "('dnd.mobile.character_classes.urls')\n", (516, 553), False, 'from django.conf.urls import patterns, url, include\n'), ((589, 621), 'django.conf.urls.include', 'include', (['"""dnd.mobile.feats.urls"""'], {}), "('dnd.mobile.feats.urls')\n", (596, 621), False, 'from django.conf.urls import patterns, url, include\n'), ((657, 689), 'django.conf.urls.include', 'include', (['"""dnd.mobile.items.urls"""'], {}), "('dnd.mobile.items.urls')\n", (664, 689), False, 'from django.conf.urls import patterns, url, include\n'), ((733, 769), 'django.conf.urls.include', 'include', (['"""dnd.mobile.languages.urls"""'], {}), "('dnd.mobile.languages.urls')\n", (740, 769), False, 'from django.conf.urls import patterns, url, include\n'), ((811, 846), 'django.conf.urls.include', 'include', (['"""dnd.mobile.monsters.urls"""'], {}), "('dnd.mobile.monsters.urls')\n", (818, 846), False, 'from django.conf.urls import patterns, url, include\n'), ((882, 914), 'django.conf.urls.include', 'include', (['"""dnd.mobile.races.urls"""'], {}), "('dnd.mobile.races.urls')\n", (889, 914), False, 'from django.conf.urls import patterns, url, include\n'), ((958, 994), 'django.conf.urls.include', 'include', (['"""dnd.mobile.rulebooks.urls"""'], {}), "('dnd.mobile.rulebooks.urls')\n", (965, 994), False, 'from django.conf.urls import patterns, url, include\n'), ((1030, 1062), 'django.conf.urls.include', 'include', (['"""dnd.mobile.rules.urls"""'], {}), "('dnd.mobile.rules.urls')\n", (1037, 1062), False, 'from django.conf.urls import patterns, url, include\n'), ((1100, 1133), 'django.conf.urls.include', 'include', (['"""dnd.mobile.skills.urls"""'], {}), "('dnd.mobile.skills.urls')\n", (1107, 1133), False, 'from django.conf.urls import patterns, url, include\n'), ((1171, 1204), 'django.conf.urls.include', 'include', (['"""dnd.mobile.spells.urls"""'], {}), "('dnd.mobile.spells.urls')\n", (1178, 1204), False, 'from django.conf.urls import patterns, url, include\n'), ((1244, 1278), 'django.conf.urls.include', 'include', (['"""dnd.mobile.deities.urls"""'], {}), "('dnd.mobile.deities.urls')\n", (1251, 1278), False, 'from django.conf.urls import patterns, url, include\n')] |
esteng/guiding-multi-step | ros_aruco.py | 3f0db0ba70b5851cc83878f4ed48cf82342a2ddf | """
Calibrate with the ROS package aruco_detect
"""
import rospy
import roslib
from geometry_msgs.msg import Transform
class ROSArUcoCalibrate:
def __init__(self, aruco_tag_len=0.0795):
print("Please roslaunch roslaunch aruco_detect aruco_detect.launch before you run!")
self.aruco_tf_topic = "/fiducial_transforms"
self._aruco_tf_info_sub = rospy.Subscriber(self.aruco_tf_topic, Transform, self._tfCb)
self.aruco_tf = None
def _tfCb(self, tf_msg):
if tf_msg is None:
rospy.logwarn("_tfCb: tf_msg is None!")
self.aruco_tf = tf_msg
def get_tf(self):
aruco_tf = self.aruco_tf
return aruco_tf
| [((374, 434), 'rospy.Subscriber', 'rospy.Subscriber', (['self.aruco_tf_topic', 'Transform', 'self._tfCb'], {}), '(self.aruco_tf_topic, Transform, self._tfCb)\n', (390, 434), False, 'import rospy\n'), ((533, 572), 'rospy.logwarn', 'rospy.logwarn', (['"""_tfCb: tf_msg is None!"""'], {}), "('_tfCb: tf_msg is None!')\n", (546, 572), False, 'import rospy\n')] |
mmalandra-kb4/service-metrics-gatherer | utils/utils.py | f9a795a43d491ef59a32121ab4ed5c2c62cb968b | """
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import logging
import re
import os
import json
from urllib.parse import urlparse
import datetime
logger = logging.getLogger("metricsGatherer.utils")
def remove_credentials_from_url(url):
parsed_url = urlparse(url)
new_netloc = re.sub("^.+?:.+?@", "", parsed_url.netloc)
return url.replace(parsed_url.netloc, new_netloc)
def get_credentials_from_url(url):
parsed_url = urlparse(url)
new_netloc = re.search("^(.+?):(.+?)@", parsed_url.netloc)
try:
username = new_netloc.group(1).strip()
password = new_netloc.group(2).strip()
return username, password
except: # noqa
return "", ""
def read_json_file(folder, filename, to_json=False):
"""Read fixture from file"""
with open(os.path.join(folder, filename), "r") as file:
return file.read() if not to_json else json.loads(file.read())
def is_the_time_for_task_starting(allowed_start_time, allowed_end_time):
start = datetime.time(int(allowed_start_time.split(":")[0]), int(allowed_start_time.split(":")[1]))
end = datetime.time(int(allowed_end_time.split(":")[0]), int(allowed_end_time.split(":")[1]))
now_time = datetime.datetime.now().time()
if start > end:
return (now_time >= start and now_time <= datetime.time(23, 59)) or\
(now_time >= datetime.time(0, 0) and now_time <= end)
return now_time >= start and now_time <= end
def take_the_date_to_check():
now_time = datetime.datetime.now().time()
if (now_time >= datetime.time(12, 0) and now_time <= datetime.time(23, 59)):
return datetime.datetime.now()
return datetime.datetime.now() - datetime.timedelta(days=1)
def build_url(main_url, url_params):
"""Build url by concating url and url_params"""
return main_url + "/" + "/".join(url_params)
def unite_project_name(project_id, prefix):
return prefix + project_id
def parse_conditions(conditions):
parsed_conditions = []
for condition in conditions.split("|"):
if not condition.strip():
continue
chosen_operator = ""
for operator in [">=", "<=", "==", "=", "<", ">"]:
if operator in condition:
chosen_operator = operator
break
condition_changed = condition.replace(chosen_operator, " ").split()
if len(condition_changed) == 2:
metric_score = None
try:
metric_score = int(condition_changed[1].strip())
except: # noqa
try:
metric_score = float(condition_changed[1].strip())
except: # noqa
pass
if metric_score is not None:
parsed_conditions.append(
(condition_changed[0].strip(), chosen_operator, metric_score))
return parsed_conditions
def compare_metrics(cur_metric, metric_threshold, operator):
if operator == ">=":
return cur_metric >= metric_threshold
if operator == ">":
return cur_metric > metric_threshold
if operator == "<=":
return cur_metric <= metric_threshold
if operator == "<":
return cur_metric < metric_threshold
if operator in ["==", "="]:
return cur_metric == metric_threshold
return False
def convert_metrics_to_string(cur_metrics):
return ";".join(["%s:%s" % (metric[0], metric[1]) for metric in cur_metrics])
| [((688, 730), 'logging.getLogger', 'logging.getLogger', (['"""metricsGatherer.utils"""'], {}), "('metricsGatherer.utils')\n", (705, 730), False, 'import logging\n'), ((788, 801), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (796, 801), False, 'from urllib.parse import urlparse\n'), ((819, 861), 're.sub', 're.sub', (['"""^.+?:.+?@"""', '""""""', 'parsed_url.netloc'], {}), "('^.+?:.+?@', '', parsed_url.netloc)\n", (825, 861), False, 'import re\n'), ((970, 983), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (978, 983), False, 'from urllib.parse import urlparse\n'), ((1001, 1046), 're.search', 're.search', (['"""^(.+?):(.+?)@"""', 'parsed_url.netloc'], {}), "('^(.+?):(.+?)@', parsed_url.netloc)\n", (1010, 1046), False, 'import re\n'), ((2153, 2176), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2174, 2176), False, 'import datetime\n'), ((2188, 2211), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2209, 2211), False, 'import datetime\n'), ((2214, 2240), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2232, 2240), False, 'import datetime\n'), ((1327, 1357), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (1339, 1357), False, 'import os\n'), ((1736, 1759), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1757, 1759), False, 'import datetime\n'), ((2026, 2049), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2047, 2049), False, 'import datetime\n'), ((2077, 2097), 'datetime.time', 'datetime.time', (['(12)', '(0)'], {}), '(12, 0)\n', (2090, 2097), False, 'import datetime\n'), ((2114, 2135), 'datetime.time', 'datetime.time', (['(23)', '(59)'], {}), '(23, 59)\n', (2127, 2135), False, 'import datetime\n'), ((1837, 1858), 'datetime.time', 'datetime.time', (['(23)', '(59)'], {}), '(23, 59)\n', (1850, 1858), False, 'import datetime\n'), ((1889, 1908), 'datetime.time', 'datetime.time', (['(0)', '(0)'], {}), '(0, 0)\n', (1902, 1908), False, 'import datetime\n')] |
nassermarafi/SRCSWArchetypes | OSAnalysisHelper.py | 105a5e40ef0ba1951108dc52b382ae0c5457057a | from __future__ import absolute_import
__author__ = 'marafi'
def SolutionAlgorithim(OData, Dt, Tol, Steps):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %f and Tol: %f ... "'%(Dt,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton with Initial Tangent ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Newton(Initial=True))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Broyden ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Broyden(8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def SolutionAlgorithimV2(OData, Dt, Tol, Steps):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %f and Tol: %f ... "'%(Dt,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Krylov... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton(MaxDim = 6))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch Bisection... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch RegulaFalsi... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def SolutionAlgorithimKrylovOnly(OData, Dt, Tol, Steps, MaxDim = 6):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %e and Tol: %e ... "'%(Dt,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Krylov... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, 1000, 2))
# OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton(MaxDim = MaxDim))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %e ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def SenSolutionAlgorithim(OData, Dt, Steps, Tol = 1e-12, KrylovMaxDim = 12, MinDt = 1e-12, NoOfIterations=3000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set conv_tol %e'%Tol))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set max_iter %d;'%NoOfIterations))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, 3000, 0))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('test EnergyIncr $conv_tol $max_iter;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('algorithm Newton;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('integrator Newmark 0.5 0.25;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('analysis Transient;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set dt %e;'%Dt))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set min_dt %e;'%MinDt))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set n_steps %d;'%Steps))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set cur_step 1;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set div 10.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set tol 1.0e-12;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('set eigenvalue [eigen 9];'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('modalDamping 0.02;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('while {$cur_step < $n_steps} {'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, NoOfIterations, 0))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript(' test EnergyIncr $conv_tol $max_iter;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' algorithm Newton;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr $dt];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "> analysis failed to converge at step $cur_step";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "> trying KrylovNewton";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' algorithm KrylovNewton -maxDim %d;'%KrylovMaxDim))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set t 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr round($dt/$div/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_dt_temp 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' while {$t < $dt} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$dt_temp < $min_dt} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "<< model did not converge (reason: time step less than $min_dt)";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "<< exiting safely";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' wipe;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' exit;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$dt_temp < [expr $dt/pow($div, 2)]} {'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol*10, NoOfIterations, 0))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript(' test EnergyIncr [expr $conv_tol*10.0] $max_iter;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt_temp];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok == 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set t [expr round(($t + $dt_temp)/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t [expr round(($mini_t + $dt_temp)/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$mini_t >= $mini_dt_temp} {set dt_temp [expr round($dt_temp*$div/$tol)*$tol]};'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' } else {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_dt_temp [expr round($dt_temp/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr round($dt_temp/$div/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$cur_step % 1 == 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "Running Tim History Step: $cur_step out of %d (Sen Algo.)";'%Steps))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' incr cur_step;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('};'))
def PushOverSolutionAlgorithim(OData, StepSize, Tol, ControlNode):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton with Initial Tangent ... "'))
# OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
# OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Newton(Initial=True))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
#
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Broyden ... "'))
# OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
# OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Broyden(8))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimDispIncr(OData, StepSize, Tol, ControlNode):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimConstantAlgorithm(OData, StepSize, Tol, ControlNode, Iter=1000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimConstantAlgorithmDispIncr(OData, StepSize, Tol, ControlNode, NoOfIterations=1000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,NoOfIterations,2))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimConstantTol(OData, Tol, Iter=1000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) | [((224, 268), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (249, 268), False, 'import OpenSeesAPI\n'), ((290, 379), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['(\'puts "Trying Lower Dt: %f and Tol: %f ... "\' % (Dt, Tol))'], {}), '(\'puts "Trying Lower Dt: %f and Tol: %f ... "\' % (\n Dt, Tol))\n', (315, 379), False, 'import OpenSeesAPI\n'), ((393, 459), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton Line Search ... \\""""'], {}), '(\'puts "Trying Newton Line Search ... "\')\n', (418, 459), False, 'import OpenSeesAPI\n'), ((481, 531), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (517, 531), False, 'import OpenSeesAPI\n'), ((551, 613), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', ([], {'Tolerance': '(0.8)'}), '(Tolerance=0.8)\n', (598, 613), False, 'import OpenSeesAPI\n'), ((635, 701), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set ok [analyze %d %f ]' % (Steps, Dt))"], {}), "('set ok [analyze %d %f ]' % (Steps, Dt))\n", (660, 701), False, 'import OpenSeesAPI\n'), ((720, 750), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (745, 750), False, 'import OpenSeesAPI\n'), ((773, 817), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (798, 817), False, 'import OpenSeesAPI\n'), ((839, 914), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton with Initial Tangent ... \\""""'], {}), '(\'puts "Trying Newton with Initial Tangent ... "\')\n', (864, 914), False, 'import OpenSeesAPI\n'), ((936, 988), 'OpenSeesAPI.Analysis.Test.NormDispIncr', 'OpenSeesAPI.Analysis.Test.NormDispIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (974, 988), False, 'import OpenSeesAPI\n'), ((1008, 1059), 'OpenSeesAPI.Analysis.Algorithm.Newton', 'OpenSeesAPI.Analysis.Algorithm.Newton', ([], {'Initial': '(True)'}), '(Initial=True)\n', (1045, 1059), False, 'import OpenSeesAPI\n'), ((1081, 1147), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set ok [analyze %d %f ]' % (Steps, Dt))"], {}), "('set ok [analyze %d %f ]' % (Steps, Dt))\n", (1106, 1147), False, 'import OpenSeesAPI\n'), ((1166, 1196), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (1191, 1196), False, 'import OpenSeesAPI\n'), ((1219, 1263), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (1244, 1263), False, 'import OpenSeesAPI\n'), ((1285, 1340), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Broyden ... \\""""'], {}), '(\'puts "Trying Broyden ... "\')\n', (1310, 1340), False, 'import OpenSeesAPI\n'), ((1362, 1412), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (1398, 1412), False, 'import OpenSeesAPI\n'), ((1432, 1473), 'OpenSeesAPI.Analysis.Algorithm.Broyden', 'OpenSeesAPI.Analysis.Algorithm.Broyden', (['(8)'], {}), '(8)\n', (1470, 1473), False, 'import OpenSeesAPI\n'), ((1495, 1561), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set ok [analyze %d %f ]' % (Steps, Dt))"], {}), "('set ok [analyze %d %f ]' % (Steps, Dt))\n", (1520, 1561), False, 'import OpenSeesAPI\n'), ((1580, 1610), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (1605, 1610), False, 'import OpenSeesAPI\n'), ((1633, 1677), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (1658, 1677), False, 'import OpenSeesAPI\n'), ((1699, 1759), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying KrylovNewton ... \\""""'], {}), '(\'puts "Trying KrylovNewton ... "\')\n', (1724, 1759), False, 'import OpenSeesAPI\n'), ((1781, 1831), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (1817, 1831), False, 'import OpenSeesAPI\n'), ((1851, 1896), 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', ([], {}), '()\n', (1894, 1896), False, 'import OpenSeesAPI\n'), ((1918, 1984), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set ok [analyze %d %f ]' % (Steps, Dt))"], {}), "('set ok [analyze %d %f ]' % (Steps, Dt))\n", (1943, 1984), False, 'import OpenSeesAPI\n'), ((2003, 2033), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (2028, 2033), False, 'import OpenSeesAPI\n'), ((2199, 2243), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (2224, 2243), False, 'import OpenSeesAPI\n'), ((2265, 2354), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['(\'puts "Trying Lower Dt: %f and Tol: %f ... "\' % (Dt, Tol))'], {}), '(\'puts "Trying Lower Dt: %f and Tol: %f ... "\' % (\n Dt, Tol))\n', (2290, 2354), False, 'import OpenSeesAPI\n'), ((2368, 2421), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Krylov... \\""""'], {}), '(\'puts "Trying Krylov... "\')\n', (2393, 2421), False, 'import OpenSeesAPI\n'), ((2443, 2493), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (2479, 2493), False, 'import OpenSeesAPI\n'), ((2513, 2566), 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', ([], {'MaxDim': '(6)'}), '(MaxDim=6)\n', (2556, 2566), False, 'import OpenSeesAPI\n'), ((2590, 2656), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set ok [analyze %d %f ]' % (Steps, Dt))"], {}), "('set ok [analyze %d %f ]' % (Steps, Dt))\n", (2615, 2656), False, 'import OpenSeesAPI\n'), ((2675, 2705), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (2700, 2705), False, 'import OpenSeesAPI\n'), ((2728, 2772), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (2753, 2772), False, 'import OpenSeesAPI\n'), ((2794, 2857), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying NewtonLineSearch... \\""""'], {}), '(\'puts "Trying NewtonLineSearch... "\')\n', (2819, 2857), False, 'import OpenSeesAPI\n'), ((2879, 2931), 'OpenSeesAPI.Analysis.Test.NormDispIncr', 'OpenSeesAPI.Analysis.Test.NormDispIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (2917, 2931), False, 'import OpenSeesAPI\n'), ((2951, 3013), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', ([], {'Tolerance': '(0.8)'}), '(Tolerance=0.8)\n', (2998, 3013), False, 'import OpenSeesAPI\n'), ((3035, 3101), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set ok [analyze %d %f ]' % (Steps, Dt))"], {}), "('set ok [analyze %d %f ]' % (Steps, Dt))\n", (3060, 3101), False, 'import OpenSeesAPI\n'), ((3120, 3150), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (3145, 3150), False, 'import OpenSeesAPI\n'), ((3173, 3217), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (3198, 3217), False, 'import OpenSeesAPI\n'), ((3239, 3312), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying NewtonLineSearch Bisection... \\""""'], {}), '(\'puts "Trying NewtonLineSearch Bisection... "\')\n', (3264, 3312), False, 'import OpenSeesAPI\n'), ((3334, 3384), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (3370, 3384), False, 'import OpenSeesAPI\n'), ((3404, 3464), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', (['"""Bisection"""'], {}), "('Bisection')\n", (3451, 3464), False, 'import OpenSeesAPI\n'), ((3486, 3552), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set ok [analyze %d %f ]' % (Steps, Dt))"], {}), "('set ok [analyze %d %f ]' % (Steps, Dt))\n", (3511, 3552), False, 'import OpenSeesAPI\n'), ((3571, 3601), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (3596, 3601), False, 'import OpenSeesAPI\n'), ((3624, 3668), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (3649, 3668), False, 'import OpenSeesAPI\n'), ((3690, 3760), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying NewtonLineSearch Secant... \\""""'], {}), '(\'puts "Trying NewtonLineSearch Secant... "\')\n', (3715, 3760), False, 'import OpenSeesAPI\n'), ((3782, 3832), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (3818, 3832), False, 'import OpenSeesAPI\n'), ((3852, 3909), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', (['"""Secant"""'], {}), "('Secant')\n", (3899, 3909), False, 'import OpenSeesAPI\n'), ((3931, 3997), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set ok [analyze %d %f ]' % (Steps, Dt))"], {}), "('set ok [analyze %d %f ]' % (Steps, Dt))\n", (3956, 3997), False, 'import OpenSeesAPI\n'), ((4016, 4046), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (4041, 4046), False, 'import OpenSeesAPI\n'), ((4069, 4113), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (4094, 4113), False, 'import OpenSeesAPI\n'), ((4135, 4210), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying NewtonLineSearch RegulaFalsi... \\""""'], {}), '(\'puts "Trying NewtonLineSearch RegulaFalsi... "\')\n', (4160, 4210), False, 'import OpenSeesAPI\n'), ((4232, 4282), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (4268, 4282), False, 'import OpenSeesAPI\n'), ((4302, 4364), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', (['"""RegulaFalsi"""'], {}), "('RegulaFalsi')\n", (4349, 4364), False, 'import OpenSeesAPI\n'), ((4386, 4452), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set ok [analyze %d %f ]' % (Steps, Dt))"], {}), "('set ok [analyze %d %f ]' % (Steps, Dt))\n", (4411, 4452), False, 'import OpenSeesAPI\n'), ((4471, 4501), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (4496, 4501), False, 'import OpenSeesAPI\n'), ((4687, 4731), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (4712, 4731), False, 'import OpenSeesAPI\n'), ((4753, 4842), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['(\'puts "Trying Lower Dt: %e and Tol: %e ... "\' % (Dt, Tol))'], {}), '(\'puts "Trying Lower Dt: %e and Tol: %e ... "\' % (\n Dt, Tol))\n', (4778, 4842), False, 'import OpenSeesAPI\n'), ((4856, 4909), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Krylov... \\""""'], {}), '(\'puts "Trying Krylov... "\')\n', (4881, 4909), False, 'import OpenSeesAPI\n'), ((4931, 4983), 'OpenSeesAPI.Analysis.Test.NormDispIncr', 'OpenSeesAPI.Analysis.Test.NormDispIncr', (['Tol', '(1000)', '(2)'], {}), '(Tol, 1000, 2)\n', (4969, 4983), False, 'import OpenSeesAPI\n'), ((5077, 5135), 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', ([], {'MaxDim': 'MaxDim'}), '(MaxDim=MaxDim)\n', (5120, 5135), False, 'import OpenSeesAPI\n'), ((5159, 5225), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set ok [analyze %d %e ]' % (Steps, Dt))"], {}), "('set ok [analyze %d %e ]' % (Steps, Dt))\n", (5184, 5225), False, 'import OpenSeesAPI\n'), ((5244, 5274), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (5269, 5274), False, 'import OpenSeesAPI\n'), ((5433, 5483), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set conv_tol %e' % Tol)"], {}), "('set conv_tol %e' % Tol)\n", (5458, 5483), False, 'import OpenSeesAPI\n'), ((5503, 5565), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set max_iter %d;' % NoOfIterations)"], {}), "('set max_iter %d;' % NoOfIterations)\n", (5528, 5565), False, 'import OpenSeesAPI\n'), ((5585, 5637), 'OpenSeesAPI.Analysis.Test.NormDispIncr', 'OpenSeesAPI.Analysis.Test.NormDispIncr', (['Tol', '(3000)', '(0)'], {}), '(Tol, 3000, 0)\n', (5623, 5637), False, 'import OpenSeesAPI\n'), ((5971, 6015), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set dt %e;' % Dt)"], {}), "('set dt %e;' % Dt)\n", (5996, 6015), False, 'import OpenSeesAPI\n'), ((6035, 6086), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set min_dt %e;' % MinDt)"], {}), "('set min_dt %e;' % MinDt)\n", (6060, 6086), False, 'import OpenSeesAPI\n'), ((6106, 6158), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('set n_steps %d;' % Steps)"], {}), "('set n_steps %d;' % Steps)\n", (6131, 6158), False, 'import OpenSeesAPI\n'), ((6178, 6222), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set cur_step 1;"""'], {}), "('set cur_step 1;')\n", (6203, 6222), False, 'import OpenSeesAPI\n'), ((6244, 6286), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set div 10.0;"""'], {}), "('set div 10.0;')\n", (6269, 6286), False, 'import OpenSeesAPI\n'), ((6308, 6353), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set tol 1.0e-12;"""'], {}), "('set tol 1.0e-12;')\n", (6333, 6353), False, 'import OpenSeesAPI\n'), ((6524, 6583), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""while {$cur_step < $n_steps} {"""'], {}), "('while {$cur_step < $n_steps} {')\n", (6549, 6583), False, 'import OpenSeesAPI\n'), ((6605, 6667), 'OpenSeesAPI.Analysis.Test.NormDispIncr', 'OpenSeesAPI.Analysis.Test.NormDispIncr', (['Tol', 'NoOfIterations', '(0)'], {}), '(Tol, NoOfIterations, 0)\n', (6643, 6667), False, 'import OpenSeesAPI\n'), ((6779, 6827), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\talgorithm Newton;"""'], {}), "('\\talgorithm Newton;')\n", (6804, 6827), False, 'import OpenSeesAPI\n'), ((6848, 6902), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\tset ok [analyze 1 $dt];"""'], {}), "('\\tset ok [analyze 1 $dt];')\n", (6873, 6902), False, 'import OpenSeesAPI\n'), ((6923, 6969), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\tif {$ok != 0} {"""'], {}), "('\\tif {$ok != 0} {')\n", (6948, 6969), False, 'import OpenSeesAPI\n'), ((6990, 7046), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\tset dt_temp [expr $dt];"""'], {}), "('\\t\\tset dt_temp [expr $dt];')\n", (7015, 7046), False, 'import OpenSeesAPI\n'), ((7066, 7159), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\tputs "> analysis failed to converge at step $cur_step";"""'], {}), '(\n \'\\t\\tputs "> analysis failed to converge at step $cur_step";\')\n', (7091, 7159), False, 'import OpenSeesAPI\n'), ((7174, 7236), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\tputs "> trying KrylovNewton";"""'], {}), '(\'\\t\\tputs "> trying KrylovNewton";\')\n', (7199, 7236), False, 'import OpenSeesAPI\n'), ((7256, 7342), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (["('\\t\\talgorithm KrylovNewton -maxDim %d;' % KrylovMaxDim)"], {}), "('\\t\\talgorithm KrylovNewton -maxDim %d;' %\n KrylovMaxDim)\n", (7281, 7342), False, 'import OpenSeesAPI\n'), ((7356, 7412), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\tset ok [analyze 1 $dt];"""'], {}), "('\\t\\tset ok [analyze 1 $dt];')\n", (7381, 7412), False, 'import OpenSeesAPI\n'), ((7432, 7480), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\tif {$ok != 0} {"""'], {}), "('\\t\\tif {$ok != 0} {')\n", (7457, 7480), False, 'import OpenSeesAPI\n'), ((7500, 7545), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\tset t 0.0;"""'], {}), "('\\t\\t\\tset t 0.0;')\n", (7525, 7545), False, 'import OpenSeesAPI\n'), ((7564, 7614), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\tset mini_t 0.0;"""'], {}), "('\\t\\t\\tset mini_t 0.0;')\n", (7589, 7614), False, 'import OpenSeesAPI\n'), ((7633, 7718), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\tset dt_temp [expr round($dt/$div/$tol)*$tol];"""'], {}), "('\\t\\t\\tset dt_temp [expr round($dt/$div/$tol)*$tol];'\n )\n", (7658, 7718), False, 'import OpenSeesAPI\n'), ((7732, 7788), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\tset mini_dt_temp 0.0;"""'], {}), "('\\t\\t\\tset mini_dt_temp 0.0;')\n", (7757, 7788), False, 'import OpenSeesAPI\n'), ((7807, 7860), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\twhile {$t < $dt} {"""'], {}), "('\\t\\t\\twhile {$t < $dt} {')\n", (7832, 7860), False, 'import OpenSeesAPI\n'), ((7879, 7941), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\tif {$dt_temp < $min_dt} {"""'], {}), "('\\t\\t\\t\\tif {$dt_temp < $min_dt} {')\n", (7904, 7941), False, 'import OpenSeesAPI\n'), ((7959, 8079), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t\tputs "<< model did not converge (reason: time step less than $min_dt)";"""'], {}), '(\n \'\\t\\t\\t\\t\\tputs "<< model did not converge (reason: time step less than $min_dt)";\'\n )\n', (7984, 8079), False, 'import OpenSeesAPI\n'), ((8086, 8150), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t\tputs "<< exiting safely";"""'], {}), '(\'\\t\\t\\t\\t\\tputs "<< exiting safely";\')\n', (8111, 8150), False, 'import OpenSeesAPI\n'), ((8167, 8211), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t\twipe;"""'], {}), "('\\t\\t\\t\\t\\twipe;')\n", (8192, 8211), False, 'import OpenSeesAPI\n'), ((8228, 8272), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t\texit;"""'], {}), "('\\t\\t\\t\\t\\texit;')\n", (8253, 8272), False, 'import OpenSeesAPI\n'), ((8289, 8328), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t};"""'], {}), "('\\t\\t\\t\\t};')\n", (8314, 8328), False, 'import OpenSeesAPI\n'), ((8346, 8424), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\tif {$dt_temp < [expr $dt/pow($div, 2)]} {"""'], {}), "('\\t\\t\\t\\tif {$dt_temp < [expr $dt/pow($div, 2)]} {')\n", (8371, 8424), False, 'import OpenSeesAPI\n'), ((8442, 8509), 'OpenSeesAPI.Analysis.Test.NormDispIncr', 'OpenSeesAPI.Analysis.Test.NormDispIncr', (['(Tol * 10)', 'NoOfIterations', '(0)'], {}), '(Tol * 10, NoOfIterations, 0)\n', (8480, 8509), False, 'import OpenSeesAPI\n'), ((8635, 8674), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t};"""'], {}), "('\\t\\t\\t\\t};')\n", (8660, 8674), False, 'import OpenSeesAPI\n'), ((8692, 8757), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\tset ok [analyze 1 $dt_temp];"""'], {}), "('\\t\\t\\t\\tset ok [analyze 1 $dt_temp];')\n", (8717, 8757), False, 'import OpenSeesAPI\n'), ((8775, 8827), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\tif {$ok == 0} {"""'], {}), "('\\t\\t\\t\\tif {$ok == 0} {')\n", (8800, 8827), False, 'import OpenSeesAPI\n'), ((8845, 8935), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t\tset t [expr round(($t + $dt_temp)/$tol)*$tol];"""'], {}), "(\n '\\t\\t\\t\\t\\tset t [expr round(($t + $dt_temp)/$tol)*$tol];')\n", (8870, 8935), False, 'import OpenSeesAPI\n'), ((8947, 9047), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t\tset mini_t [expr round(($mini_t + $dt_temp)/$tol)*$tol];"""'], {}), "(\n '\\t\\t\\t\\t\\tset mini_t [expr round(($mini_t + $dt_temp)/$tol)*$tol];')\n", (8972, 9047), False, 'import OpenSeesAPI\n'), ((9059, 9190), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t\tif {$mini_t >= $mini_dt_temp} {set dt_temp [expr round($dt_temp*$div/$tol)*$tol]};"""'], {}), "(\n '\\t\\t\\t\\t\\tif {$mini_t >= $mini_dt_temp} {set dt_temp [expr round($dt_temp*$div/$tol)*$tol]};'\n )\n", (9084, 9190), False, 'import OpenSeesAPI\n'), ((9197, 9242), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t} else {"""'], {}), "('\\t\\t\\t\\t} else {')\n", (9222, 9242), False, 'import OpenSeesAPI\n'), ((9260, 9314), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t\tset mini_t 0.0;"""'], {}), "('\\t\\t\\t\\t\\tset mini_t 0.0;')\n", (9285, 9314), False, 'import OpenSeesAPI\n'), ((9331, 9425), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t\tset mini_dt_temp [expr round($dt_temp/$tol)*$tol];"""'], {}), "(\n '\\t\\t\\t\\t\\tset mini_dt_temp [expr round($dt_temp/$tol)*$tol];')\n", (9356, 9425), False, 'import OpenSeesAPI\n'), ((9437, 9531), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t\tset dt_temp [expr round($dt_temp/$div/$tol)*$tol];"""'], {}), "(\n '\\t\\t\\t\\t\\tset dt_temp [expr round($dt_temp/$div/$tol)*$tol];')\n", (9462, 9531), False, 'import OpenSeesAPI\n'), ((9543, 9582), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t\t};"""'], {}), "('\\t\\t\\t\\t};')\n", (9568, 9582), False, 'import OpenSeesAPI\n'), ((9600, 9637), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t\t};"""'], {}), "('\\t\\t\\t};')\n", (9625, 9637), False, 'import OpenSeesAPI\n'), ((9656, 9691), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t\t};"""'], {}), "('\\t\\t};')\n", (9681, 9691), False, 'import OpenSeesAPI\n'), ((9711, 9744), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t};"""'], {}), "('\\t};')\n", (9736, 9744), False, 'import OpenSeesAPI\n'), ((9765, 9821), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\tif {$cur_step % 1 == 0} {"""'], {}), "('\\tif {$cur_step % 1 == 0} {')\n", (9790, 9821), False, 'import OpenSeesAPI\n'), ((9842, 9957), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['(\'\\t\\tputs "Running Tim History Step: $cur_step out of %d (Sen Algo.)";\' %\n Steps)'], {}), '(\n \'\\t\\tputs "Running Tim History Step: $cur_step out of %d (Sen Algo.)";\' %\n Steps)\n', (9867, 9957), False, 'import OpenSeesAPI\n'), ((9966, 9999), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\t};"""'], {}), "('\\t};')\n", (9991, 9999), False, 'import OpenSeesAPI\n'), ((10020, 10065), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""\tincr cur_step;"""'], {}), "('\\tincr cur_step;')\n", (10045, 10065), False, 'import OpenSeesAPI\n'), ((10086, 10117), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""};"""'], {}), "('};')\n", (10111, 10117), False, 'import OpenSeesAPI\n'), ((10302, 10346), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (10327, 10346), False, 'import OpenSeesAPI\n'), ((10368, 10466), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['(\'puts "Trying Smaller Step: %f and Tol: %f ... "\' % (StepSize, Tol))'], {}), '(\'puts "Trying Smaller Step: %f and Tol: %f ... "\' %\n (StepSize, Tol))\n', (10393, 10466), False, 'import OpenSeesAPI\n'), ((10482, 10570), 'OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl', 'OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl', (['ControlNode', '(1)', 'StepSize'], {}), '(ControlNode, 1,\n StepSize)\n', (10540, 10570), False, 'import OpenSeesAPI\n'), ((10589, 10619), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (10614, 10619), False, 'import OpenSeesAPI\n'), ((10642, 10686), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (10667, 10686), False, 'import OpenSeesAPI\n'), ((10708, 10768), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying KrylovNewton ... \\""""'], {}), '(\'puts "Trying KrylovNewton ... "\')\n', (10733, 10768), False, 'import OpenSeesAPI\n'), ((10790, 10840), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (10826, 10840), False, 'import OpenSeesAPI\n'), ((10860, 10905), 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', ([], {}), '()\n', (10903, 10905), False, 'import OpenSeesAPI\n'), ((10927, 10974), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (10952, 10974), False, 'import OpenSeesAPI\n'), ((10996, 11026), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (11021, 11026), False, 'import OpenSeesAPI\n'), ((11049, 11093), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (11074, 11093), False, 'import OpenSeesAPI\n'), ((11115, 11181), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton Line Search ... \\""""'], {}), '(\'puts "Trying Newton Line Search ... "\')\n', (11140, 11181), False, 'import OpenSeesAPI\n'), ((11203, 11253), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (11239, 11253), False, 'import OpenSeesAPI\n'), ((11273, 11335), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', ([], {'Tolerance': '(0.8)'}), '(Tolerance=0.8)\n', (11320, 11335), False, 'import OpenSeesAPI\n'), ((11357, 11404), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (11382, 11404), False, 'import OpenSeesAPI\n'), ((11426, 11456), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (11451, 11456), False, 'import OpenSeesAPI\n'), ((12336, 12380), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (12361, 12380), False, 'import OpenSeesAPI\n'), ((12402, 12478), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton Line Search BiSection ... \\""""'], {}), '(\'puts "Trying Newton Line Search BiSection ... "\')\n', (12427, 12478), False, 'import OpenSeesAPI\n'), ((12500, 12550), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (12536, 12550), False, 'import OpenSeesAPI\n'), ((12570, 12630), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', (['"""Bisection"""'], {}), "('Bisection')\n", (12617, 12630), False, 'import OpenSeesAPI\n'), ((12652, 12699), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (12677, 12699), False, 'import OpenSeesAPI\n'), ((12721, 12751), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (12746, 12751), False, 'import OpenSeesAPI\n'), ((12774, 12818), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (12799, 12818), False, 'import OpenSeesAPI\n'), ((12840, 12912), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton Line Search Secant... \\""""'], {}), '(\'puts "Trying Newton Line Search Secant... "\')\n', (12865, 12912), False, 'import OpenSeesAPI\n'), ((12934, 12984), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (12970, 12984), False, 'import OpenSeesAPI\n'), ((13004, 13061), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', (['"""Secant"""'], {}), "('Secant')\n", (13051, 13061), False, 'import OpenSeesAPI\n'), ((13083, 13130), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (13108, 13130), False, 'import OpenSeesAPI\n'), ((13152, 13182), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (13177, 13182), False, 'import OpenSeesAPI\n'), ((13205, 13249), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (13230, 13249), False, 'import OpenSeesAPI\n'), ((13271, 13349), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton Line Search RegulaFalsi ... \\""""'], {}), '(\'puts "Trying Newton Line Search RegulaFalsi ... "\')\n', (13296, 13349), False, 'import OpenSeesAPI\n'), ((13371, 13421), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (13407, 13421), False, 'import OpenSeesAPI\n'), ((13441, 13503), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', (['"""RegulaFalsi"""'], {}), "('RegulaFalsi')\n", (13488, 13503), False, 'import OpenSeesAPI\n'), ((13525, 13572), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (13550, 13572), False, 'import OpenSeesAPI\n'), ((13594, 13624), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (13619, 13624), False, 'import OpenSeesAPI\n'), ((13816, 13860), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (13841, 13860), False, 'import OpenSeesAPI\n'), ((13882, 13980), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['(\'puts "Trying Smaller Step: %f and Tol: %f ... "\' % (StepSize, Tol))'], {}), '(\'puts "Trying Smaller Step: %f and Tol: %f ... "\' %\n (StepSize, Tol))\n', (13907, 13980), False, 'import OpenSeesAPI\n'), ((13996, 14084), 'OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl', 'OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl', (['ControlNode', '(1)', 'StepSize'], {}), '(ControlNode, 1,\n StepSize)\n', (14054, 14084), False, 'import OpenSeesAPI\n'), ((14103, 14133), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (14128, 14133), False, 'import OpenSeesAPI\n'), ((14156, 14200), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (14181, 14200), False, 'import OpenSeesAPI\n'), ((14222, 14282), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying KrylovNewton ... \\""""'], {}), '(\'puts "Trying KrylovNewton ... "\')\n', (14247, 14282), False, 'import OpenSeesAPI\n'), ((14304, 14356), 'OpenSeesAPI.Analysis.Test.NormDispIncr', 'OpenSeesAPI.Analysis.Test.NormDispIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (14342, 14356), False, 'import OpenSeesAPI\n'), ((14376, 14421), 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', ([], {}), '()\n', (14419, 14421), False, 'import OpenSeesAPI\n'), ((14443, 14490), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (14468, 14490), False, 'import OpenSeesAPI\n'), ((14512, 14542), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (14537, 14542), False, 'import OpenSeesAPI\n'), ((14565, 14609), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (14590, 14609), False, 'import OpenSeesAPI\n'), ((14631, 14697), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton Line Search ... \\""""'], {}), '(\'puts "Trying Newton Line Search ... "\')\n', (14656, 14697), False, 'import OpenSeesAPI\n'), ((14719, 14771), 'OpenSeesAPI.Analysis.Test.NormDispIncr', 'OpenSeesAPI.Analysis.Test.NormDispIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (14757, 14771), False, 'import OpenSeesAPI\n'), ((14791, 14853), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', ([], {'Tolerance': '(0.8)'}), '(Tolerance=0.8)\n', (14838, 14853), False, 'import OpenSeesAPI\n'), ((14875, 14922), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (14900, 14922), False, 'import OpenSeesAPI\n'), ((14944, 14974), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (14969, 14974), False, 'import OpenSeesAPI\n'), ((14997, 15041), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (15022, 15041), False, 'import OpenSeesAPI\n'), ((15063, 15139), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton Line Search BiSection ... \\""""'], {}), '(\'puts "Trying Newton Line Search BiSection ... "\')\n', (15088, 15139), False, 'import OpenSeesAPI\n'), ((15161, 15213), 'OpenSeesAPI.Analysis.Test.NormDispIncr', 'OpenSeesAPI.Analysis.Test.NormDispIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (15199, 15213), False, 'import OpenSeesAPI\n'), ((15233, 15293), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', (['"""Bisection"""'], {}), "('Bisection')\n", (15280, 15293), False, 'import OpenSeesAPI\n'), ((15315, 15362), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (15340, 15362), False, 'import OpenSeesAPI\n'), ((15384, 15414), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (15409, 15414), False, 'import OpenSeesAPI\n'), ((15437, 15481), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (15462, 15481), False, 'import OpenSeesAPI\n'), ((15503, 15575), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton Line Search Secant... \\""""'], {}), '(\'puts "Trying Newton Line Search Secant... "\')\n', (15528, 15575), False, 'import OpenSeesAPI\n'), ((15597, 15649), 'OpenSeesAPI.Analysis.Test.NormDispIncr', 'OpenSeesAPI.Analysis.Test.NormDispIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (15635, 15649), False, 'import OpenSeesAPI\n'), ((15669, 15726), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', (['"""Secant"""'], {}), "('Secant')\n", (15716, 15726), False, 'import OpenSeesAPI\n'), ((15748, 15795), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (15773, 15795), False, 'import OpenSeesAPI\n'), ((15817, 15847), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (15842, 15847), False, 'import OpenSeesAPI\n'), ((15870, 15914), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (15895, 15914), False, 'import OpenSeesAPI\n'), ((15936, 16014), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton Line Search RegulaFalsi ... \\""""'], {}), '(\'puts "Trying Newton Line Search RegulaFalsi ... "\')\n', (15961, 16014), False, 'import OpenSeesAPI\n'), ((16036, 16088), 'OpenSeesAPI.Analysis.Test.NormDispIncr', 'OpenSeesAPI.Analysis.Test.NormDispIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (16074, 16088), False, 'import OpenSeesAPI\n'), ((16108, 16170), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', (['"""RegulaFalsi"""'], {}), "('RegulaFalsi')\n", (16155, 16170), False, 'import OpenSeesAPI\n'), ((16192, 16239), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (16217, 16239), False, 'import OpenSeesAPI\n'), ((16261, 16291), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (16286, 16291), False, 'import OpenSeesAPI\n'), ((16432, 16476), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (16457, 16476), False, 'import OpenSeesAPI\n'), ((16498, 16596), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['(\'puts "Trying Smaller Step: %f and Tol: %f ... "\' % (StepSize, Tol))'], {}), '(\'puts "Trying Smaller Step: %f and Tol: %f ... "\' %\n (StepSize, Tol))\n', (16523, 16596), False, 'import OpenSeesAPI\n'), ((16611, 16641), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (16636, 16641), False, 'import OpenSeesAPI\n'), ((16664, 16752), 'OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl', 'OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl', (['ControlNode', '(1)', 'StepSize'], {}), '(ControlNode, 1,\n StepSize)\n', (16722, 16752), False, 'import OpenSeesAPI\n'), ((16771, 16815), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (16796, 16815), False, 'import OpenSeesAPI\n'), ((16837, 16897), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying KrylovNewton ... \\""""'], {}), '(\'puts "Trying KrylovNewton ... "\')\n', (16862, 16897), False, 'import OpenSeesAPI\n'), ((16919, 16969), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', '(1000)', '(0)'], {}), '(Tol, 1000, 0)\n', (16955, 16969), False, 'import OpenSeesAPI\n'), ((16989, 17034), 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', ([], {}), '()\n', (17032, 17034), False, 'import OpenSeesAPI\n'), ((17056, 17103), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (17081, 17103), False, 'import OpenSeesAPI\n'), ((17125, 17155), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (17150, 17155), False, 'import OpenSeesAPI\n'), ((17314, 17358), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (17339, 17358), False, 'import OpenSeesAPI\n'), ((17380, 17478), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['(\'puts "Trying Smaller Step: %f and Tol: %f ... "\' % (StepSize, Tol))'], {}), '(\'puts "Trying Smaller Step: %f and Tol: %f ... "\' %\n (StepSize, Tol))\n', (17405, 17478), False, 'import OpenSeesAPI\n'), ((17493, 17523), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (17518, 17523), False, 'import OpenSeesAPI\n'), ((17546, 17634), 'OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl', 'OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl', (['ControlNode', '(1)', 'StepSize'], {}), '(ControlNode, 1,\n StepSize)\n', (17604, 17634), False, 'import OpenSeesAPI\n'), ((17653, 17697), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (17678, 17697), False, 'import OpenSeesAPI\n'), ((17719, 17779), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying KrylovNewton ... \\""""'], {}), '(\'puts "Trying KrylovNewton ... "\')\n', (17744, 17779), False, 'import OpenSeesAPI\n'), ((17801, 17863), 'OpenSeesAPI.Analysis.Test.NormDispIncr', 'OpenSeesAPI.Analysis.Test.NormDispIncr', (['Tol', 'NoOfIterations', '(2)'], {}), '(Tol, NoOfIterations, 2)\n', (17839, 17863), False, 'import OpenSeesAPI\n'), ((17883, 17928), 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', ([], {}), '()\n', (17926, 17928), False, 'import OpenSeesAPI\n'), ((17950, 17997), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (17975, 17997), False, 'import OpenSeesAPI\n'), ((18019, 18049), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (18044, 18049), False, 'import OpenSeesAPI\n'), ((18161, 18205), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (18186, 18205), False, 'import OpenSeesAPI\n'), ((18227, 18287), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying KrylovNewton ... \\""""'], {}), '(\'puts "Trying KrylovNewton ... "\')\n', (18252, 18287), False, 'import OpenSeesAPI\n'), ((18309, 18359), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', 'Iter', '(0)'], {}), '(Tol, Iter, 0)\n', (18345, 18359), False, 'import OpenSeesAPI\n'), ((18379, 18424), 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', 'OpenSeesAPI.Analysis.Algorithm.KrylovNewton', ([], {}), '()\n', (18422, 18424), False, 'import OpenSeesAPI\n'), ((18446, 18493), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (18471, 18493), False, 'import OpenSeesAPI\n'), ((18515, 18545), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (18540, 18545), False, 'import OpenSeesAPI\n'), ((18568, 18612), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (18593, 18612), False, 'import OpenSeesAPI\n'), ((18634, 18700), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton Line Search ... \\""""'], {}), '(\'puts "Trying Newton Line Search ... "\')\n', (18659, 18700), False, 'import OpenSeesAPI\n'), ((18722, 18772), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', 'Iter', '(0)'], {}), '(Tol, Iter, 0)\n', (18758, 18772), False, 'import OpenSeesAPI\n'), ((18792, 18854), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', ([], {'Tolerance': '(0.8)'}), '(Tolerance=0.8)\n', (18839, 18854), False, 'import OpenSeesAPI\n'), ((18876, 18923), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (18901, 18923), False, 'import OpenSeesAPI\n'), ((18945, 18975), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (18970, 18975), False, 'import OpenSeesAPI\n'), ((18998, 19042), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (19023, 19042), False, 'import OpenSeesAPI\n'), ((19064, 19140), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton Line Search BiSection ... \\""""'], {}), '(\'puts "Trying Newton Line Search BiSection ... "\')\n', (19089, 19140), False, 'import OpenSeesAPI\n'), ((19162, 19212), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', 'Iter', '(0)'], {}), '(Tol, Iter, 0)\n', (19198, 19212), False, 'import OpenSeesAPI\n'), ((19232, 19292), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', (['"""Bisection"""'], {}), "('Bisection')\n", (19279, 19292), False, 'import OpenSeesAPI\n'), ((19314, 19361), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (19339, 19361), False, 'import OpenSeesAPI\n'), ((19383, 19413), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (19408, 19413), False, 'import OpenSeesAPI\n'), ((19436, 19480), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (19461, 19480), False, 'import OpenSeesAPI\n'), ((19502, 19574), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton Line Search Secant... \\""""'], {}), '(\'puts "Trying Newton Line Search Secant... "\')\n', (19527, 19574), False, 'import OpenSeesAPI\n'), ((19596, 19646), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', 'Iter', '(0)'], {}), '(Tol, Iter, 0)\n', (19632, 19646), False, 'import OpenSeesAPI\n'), ((19666, 19723), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', (['"""Secant"""'], {}), "('Secant')\n", (19713, 19723), False, 'import OpenSeesAPI\n'), ((19745, 19792), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (19770, 19792), False, 'import OpenSeesAPI\n'), ((19814, 19844), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (19839, 19844), False, 'import OpenSeesAPI\n'), ((19867, 19911), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""if {$ok != 0} {"""'], {}), "('if {$ok != 0} {')\n", (19892, 19911), False, 'import OpenSeesAPI\n'), ((19933, 20011), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""puts "Trying Newton Line Search RegulaFalsi ... \\""""'], {}), '(\'puts "Trying Newton Line Search RegulaFalsi ... "\')\n', (19958, 20011), False, 'import OpenSeesAPI\n'), ((20033, 20083), 'OpenSeesAPI.Analysis.Test.EnergyIncr', 'OpenSeesAPI.Analysis.Test.EnergyIncr', (['Tol', 'Iter', '(0)'], {}), '(Tol, Iter, 0)\n', (20069, 20083), False, 'import OpenSeesAPI\n'), ((20103, 20165), 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', 'OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch', (['"""RegulaFalsi"""'], {}), "('RegulaFalsi')\n", (20150, 20165), False, 'import OpenSeesAPI\n'), ((20187, 20234), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""set ok [analyze 1]"""'], {}), "('set ok [analyze 1]')\n", (20212, 20234), False, 'import OpenSeesAPI\n'), ((20256, 20286), 'OpenSeesAPI.TCL.TCLScript', 'OpenSeesAPI.TCL.TCLScript', (['"""}"""'], {}), "('}')\n", (20281, 20286), False, 'import OpenSeesAPI\n')] |
socialpoint-labs/unity-yaml-parser | unityparser/commands.py | 91c175140ed32aed301bc34d4311f370da69a8ba | import re
from argparse import ArgumentParser
from multiprocessing import Pool, Manager, Process
from pathlib import Path
from .utils import UnityDocument
YAML_HEADER = '%YAML'
class UnityProjectTester:
"""
Class to run tests on a given Unity project folder
"""
AVAILABLE_COMMANDS = ('test_no_yaml_is_modified',)
def __init__(self):
self.options = None
def run(self):
top_parser = ArgumentParser()
subparser = top_parser.add_subparsers()
subparser.required = True
for cmd in UnityProjectTester.AVAILABLE_COMMANDS:
fn = getattr(self, cmd)
parser = subparser.add_parser(cmd, help=fn.__doc__)
parser.set_defaults(func=fn)
top_parser.add_argument('project_path', help='Path to the Unity project folder')
top_parser.add_argument('--exclude',
help='Exclude regexp when searching project files. Can be specified multiple times.',
default=None,
action='append')
top_parser.add_argument('--keep-changes',
help='If a file changes after serialization, do not revert the changes.',
default=False,
action='store_true')
top_parser.add_argument('--dry-run',
help='Dont\'t modify.',
default=False,
action='store_true')
try:
self.options = top_parser.parse_args()
except TypeError:
top_parser.print_help()
return 2
# run given function
self.options.func()
def test_no_yaml_is_modified(self):
"""
Recurse the whole project folder looking for '.asset' files, load and save them all, and check that
there are no modifications
"""
if self.options.dry_run:
print("Dry-run mode enabled: YAMLs won't be dumped.")
if self.options.keep_changes:
print("Keep changes mode will not have any effect during dry run.")
elif self.options.keep_changes:
print("Keep changes mode enabled: Changes to files will be kept.")
project_path = Path(self.options.project_path)
asset_file_paths = [p for p in project_path.rglob('*.asset')]
print("Found {} '.asset' files".format(len(asset_file_paths)))
def is_path_included(path):
# compare regexp against absolute path
return not any(rexp.search(str(path.resolve())) for rexp in rexps)
if self.options.exclude is not None:
rexps = [re.compile(rexp) for rexp in self.options.exclude]
valid_file_paths = [p for p in filter(is_path_included, asset_file_paths)]
print("Excluded {} '.asset' files".format(len(asset_file_paths) - len(valid_file_paths)))
else:
valid_file_paths = asset_file_paths
file_results = []
with Manager() as manager:
print_queue = manager.Queue()
diff_list = manager.list()
queue_process = Process(target=UnityProjectTester.read_output, args=(print_queue,))
queue_process.start()
with Pool() as pool:
for f in valid_file_paths:
async_res = pool.apply_async(UnityProjectTester.open_and_save,
(f, print_queue, diff_list, self.options.keep_changes,
self.options.dry_run))
file_results.append((f, async_res))
pool.close()
pool.join()
# signal end of queue with None token
print_queue.put(None)
queue_process.join()
error_results = list(filter(lambda r: not r[1].successful(), file_results))
if len(error_results):
# raise the first exception
file_path, result = error_results[0]
print("Python process evaluating file {} failed with the following exception:".format(
file_path.resolve()), flush=True)
result.get()
if len(diff_list):
print("{} files are different now:".format(len(diff_list)))
print('\n'.join([str(f.resolve()) for f in diff_list]))
@staticmethod
def read_output(print_queue):
msg = print_queue.get()
while msg is not None:
print(msg, flush=True)
msg = print_queue.get()
@staticmethod
def open_and_save(asset_file_path, print_queue, diff_list, keep_changes=False, dry_run=False):
# check YAML version header, save original content
with open(str(asset_file_path), 'rb') as fp:
header = fp.read(len(YAML_HEADER))
try:
is_yaml_file = header.decode('utf-8') == YAML_HEADER
except UnicodeDecodeError:
is_yaml_file = False
finally:
if not is_yaml_file:
print_queue.put("Ignoring non-yaml file {}".format(asset_file_path))
return
else:
fp.seek(0)
print_queue.put("Processing {}".format(asset_file_path))
a_file_content = fp.read()
doc = UnityDocument.load_yaml(str(asset_file_path))
if dry_run:
return
try:
doc.dump_yaml()
with open(str(asset_file_path), 'rb') as fp:
b_file_content = fp.read()
# compare
if a_file_content != b_file_content:
diff_list.append(asset_file_path)
if not keep_changes:
with open(str(asset_file_path), 'wb') as fp:
fp.write(a_file_content)
except Exception:
with open(str(asset_file_path), 'wb') as fp:
fp.write(a_file_content)
raise
if __name__ == '__main__':
# None is considered successful
code = UnityProjectTester().run() or 0
exit(code)
| [((427, 443), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (441, 443), False, 'from argparse import ArgumentParser\n'), ((2309, 2340), 'pathlib.Path', 'Path', (['self.options.project_path'], {}), '(self.options.project_path)\n', (2313, 2340), False, 'from pathlib import Path\n'), ((3058, 3067), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (3065, 3067), False, 'from multiprocessing import Pool, Manager, Process\n'), ((3189, 3256), 'multiprocessing.Process', 'Process', ([], {'target': 'UnityProjectTester.read_output', 'args': '(print_queue,)'}), '(target=UnityProjectTester.read_output, args=(print_queue,))\n', (3196, 3256), False, 'from multiprocessing import Pool, Manager, Process\n'), ((2716, 2732), 're.compile', 're.compile', (['rexp'], {}), '(rexp)\n', (2726, 2732), False, 'import re\n'), ((3308, 3314), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (3312, 3314), False, 'from multiprocessing import Pool, Manager, Process\n')] |
kaizhengny/LeetCode | DP/Leetcode 221. Maximal Square.py | 67d64536ab80f4966699fe7460d165f2a98d6a82 | class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
if not matrix: return 0
m, n = len(matrix), len(matrix[0])
dp = [[0]*n for _ in range(m)]
res = 0
for i in range(m):
dp[i][0] = int(matrix[i][0])
for j in range(n):
dp[0][j] = int(matrix[0][j])
for i in range(1, m):
for j in range(1, n):
if matrix[i][j] == '1':
dp[i][j] = min(dp[i-1][j],dp[i-1][j-1],dp[i][j-1])+1
res = max(res, dp[i][j])
return res**2 | [] |
Polyconseil/dokang | dokang/harvesters/__init__.py | b0ab3e4aabfb97adb2a2e877a42fc1896e5fcf08 | # -*- coding: utf-8 -*-
# Copyright (c) Polyconseil SAS. All rights reserved.
import hashlib
import json
import logging
import os
import re
from .html import html_config, HtmlHarvester # pylint: disable=unused-import
from .sphinx import ( # pylint: disable=unused-import
sphinx_config, sphinx_rtd_config,
SphinxHarvester, ReadTheDocsSphinxHarvester
)
logger = logging.getLogger(__name__)
def _must_process_path(path, include, exclude):
for exp in include:
if exp.match(path):
return True
for exp in exclude:
if exp.match(path):
return False
return True
def _compute_hash(path):
h = hashlib.md5()
with open(path, 'rb') as fp:
while 1:
buff = fp.read(8192)
if not buff:
break
h.update(buff)
return h.hexdigest()
def harvest_set(base_dir, doc_set, config, hashes, force):
"""Harvest a document set and return documents as dictionaries.
``config`` is the harvester configuration. It should contain a key
for each supported file extensions. ``hashes`` is a dictionary
that links the path of each indexed file to its hash. It is used
to decide whether the document should be indexed again. ``force``
indicates whether to reindex a document even if it has not ben
modified since the last indexation.
This function is a generator. It yields dictionaries. Each
dictionary should represent a document and contain the following
keys in addition to the keys returned by the harvester itself.
Each text-like value should be a string (in Python 3) or a unicode
object (in Python 2).
path
The path of the document relative to the root of the document
set.
set
The id of the document set. It should be ``doc_set``.
"""
config_copy = config.copy()
include = [re.compile(exp) for exp in config_copy.pop('include') or ()]
exclude = [re.compile(exp) for exp in config_copy.pop('exclude') or ()]
extensions = config_copy
for dir_path, _dir_names, file_names in os.walk(base_dir):
for filename in file_names:
path = os.path.join(dir_path, filename)
relative_path = os.path.relpath(path, base_dir)
if not _must_process_path(relative_path, include, exclude):
logger.debug('Excluded file "%s": include/exclude rules.', relative_path)
continue
_, extension = os.path.splitext(filename)
extension = extension.lstrip('.') # remove leading dot
harvester_class = extensions.get(extension)
if harvester_class is None:
logger.debug('Excluded file "%s": no harvester found for %s.', relative_path, extension)
continue
current_hash = _compute_hash(path)
indexed_hash = hashes.get(relative_path)
if not force and (indexed_hash == current_hash):
logger.debug('Excluded file: "%s": not modified since last indexation.', relative_path)
continue
try:
logger.debug('Indexing file "%s"', relative_path)
doc = harvester_class().harvest_file(path)
except Exception: # pylint: disable=broad-except
logger.exception("Could not index document %s", path)
else:
if doc:
if relative_path == 'index.html':
with open(os.path.join(base_dir, '.dokang'), 'w') as fp:
json.dump({'title': doc['title']}, fp)
doc['path'] = relative_path
doc['set'] = doc_set
doc['hash'] = current_hash
yield doc
| [((374, 401), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (391, 401), False, 'import logging\n'), ((656, 669), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (667, 669), False, 'import hashlib\n'), ((2091, 2108), 'os.walk', 'os.walk', (['base_dir'], {}), '(base_dir)\n', (2098, 2108), False, 'import os\n'), ((1881, 1896), 're.compile', 're.compile', (['exp'], {}), '(exp)\n', (1891, 1896), False, 'import re\n'), ((1957, 1972), 're.compile', 're.compile', (['exp'], {}), '(exp)\n', (1967, 1972), False, 'import re\n'), ((2165, 2197), 'os.path.join', 'os.path.join', (['dir_path', 'filename'], {}), '(dir_path, filename)\n', (2177, 2197), False, 'import os\n'), ((2226, 2257), 'os.path.relpath', 'os.path.relpath', (['path', 'base_dir'], {}), '(path, base_dir)\n', (2241, 2257), False, 'import os\n'), ((2472, 2498), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2488, 2498), False, 'import os\n'), ((3562, 3600), 'json.dump', 'json.dump', (["{'title': doc['title']}", 'fp'], {}), "({'title': doc['title']}, fp)\n", (3571, 3600), False, 'import json\n'), ((3487, 3520), 'os.path.join', 'os.path.join', (['base_dir', '""".dokang"""'], {}), "(base_dir, '.dokang')\n", (3499, 3520), False, 'import os\n')] |
semccomas/string-method-gmxapi | __init__.py | fb68dce792d35df739225b1048e0816a4a61d45e | __all__ = ["stringmethod"]
| [] |
yangtao4389/pinche | carPooling/migrations/0018_auto_20190521_1651.py | 81463761058f67d47cea980f29a061b1e1b2d08a | # Generated by Django 2.0.4 on 2019-05-21 16:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carPooling', '0017_carpoolingrecunbook'),
]
operations = [
migrations.AlterField(
model_name='carpoolinguserconf',
name='c_name',
field=models.CharField(max_length=128, null=True, verbose_name='真实姓名'),
),
migrations.AlterField(
model_name='carpoolinguserconf',
name='c_phone',
field=models.CharField(db_index=True, max_length=11, verbose_name='电话号码'),
),
migrations.AlterField(
model_name='carpoolinguserconf',
name='c_weixin_id',
field=models.CharField(db_index=True, max_length=128, null=True, verbose_name='微信id'),
),
]
| [((352, 416), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'null': '(True)', 'verbose_name': '"""真实姓名"""'}), "(max_length=128, null=True, verbose_name='真实姓名')\n", (368, 416), False, 'from django.db import migrations, models\n'), ((551, 618), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'max_length': '(11)', 'verbose_name': '"""电话号码"""'}), "(db_index=True, max_length=11, verbose_name='电话号码')\n", (567, 618), False, 'from django.db import migrations, models\n'), ((757, 836), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'max_length': '(128)', 'null': '(True)', 'verbose_name': '"""微信id"""'}), "(db_index=True, max_length=128, null=True, verbose_name='微信id')\n", (773, 836), False, 'from django.db import migrations, models\n')] |
Zer0-One/fuckbot | src/fuckbot/ticker.py | 02f5a112988e25a9f04a9a941a55f11cf51c3d8f | import discord
import logging
TRADING_API_URL='https://cloud.iexapis.com/stable/stock/{0}/quote'
TRADING_API_ICON='https://iextrading.com/favicon.ico'
def ticker_embed(symbol):
ticker = discord.Embed(title=f"{symbol}".upper(), type="rich", color=3029236, url=TRADING_API_URL.format(symbol))
ticker.set_author(name="IEXTrading")
return ticker
| [] |
johny-c/minos | minos/lib/util/StateSet.py | 660e991f44118382f4a3cb7566670c4159d33fe3 | import bz2
import csv
import collections
import math
from enum import Enum
class Select(Enum):
FIRST = 'first'
RANGE_KEY = 'range_key'
RANGE_VALUE = 'range_value'
class SelectPolicy:
def __init__(self, policy, field=None):
self.policy = policy
self.field = field
class StateSet:
""" Wrapper for set of episode val/test states """
def __init__(self, scenes_file=None, states_files=None,
scene_filter=None, episode_filter=None, max_states_per_scene=None,
select_policy=SelectPolicy(Select.FIRST)):
self.states = []
self.scenes = []
self.scenes_by_id = {}
self.states_by_scene = {}
self.select_policy = select_policy
if scenes_file:
self._load_scenes(scenes_file, scene_filter)
if states_files:
if type(states_files) is str:
self._load_states(states_files, max_states_per_scene, episode_filter)
elif isinstance(states_files, collections.Iterable):
for states_file in states_files:
self._load_states(states_file, max_states_per_scene, episode_filter)
self._embed_states_in_scenes()
def get_splits(self, max_states_per_scene=None):
"""Get dictionary of StateSets keyed by scene 'set' i.e. dataset split"""
scenes_by_split = {}
for scene in self.scenes:
scenes_by_split.setdefault(scene['set'], []).append(scene)
state_sets_dict = {}
for split, scenes in scenes_by_split.items():
ss = StateSet()
ss._populate_from_lists(scenes, self.states_by_scene, max_states_per_scene)
state_sets_dict[split] = ss
return state_sets_dict
def get_scenes(self):
return self.scenes
def get_states(self):
return self.states
def get_states_by_scene_id(self, scene_id):
return self.states_by_scene[scene_id]
def _select_n_states(self, states, n):
# Select n states from big list of states
policy = self.select_policy.policy
field = self.select_policy.field
if n is not None and n < len(states):
if policy == Select.FIRST:
if field is not None:
# sort by field
states = sorted(states, key=lambda x: x[field])
return states[:n]
elif policy == Select.RANGE_KEY:
# sort by field
states = sorted(states, key=lambda x: x[field])
# select by evenly dividing indices
r = len(states)/float(n)
selected = []
for i in range(n):
si = int(math.floor(math.ceil(r*i)/2))
selected.append(states[si])
return selected
elif policy == Select.RANGE_VALUE:
# sort by field and get range (value)
states = sorted(states, key=lambda x: x[field])
fmin = states[0][field]
fmax = states[-1][field]
# print('Range is %f to %f' % (fmin,fmax))
# from range, divide up into n buckets
r = (fmax-fmin)/float(n)
buckets = []
for i in range(n):
buckets.append([])
for state in states:
bi = int(min(math.ceil((state[field] - fmin)/r), n-1))
buckets[bi].append(state)
# make sure all buckets have something
for i, bucket in enumerate(buckets):
if len(bucket) == 0:
# print('Nothing in bucket %d' % i)
# still some from other buckets
pi = max(i-1, 0)
ni = min(i+1, n-1)
nlen = len(buckets[ni])
plen = len(buckets[pi])
if nlen > plen:
# take half from bucket[ni] and put in current bucket
k = math.floor(nlen/2)
buckets[i] = buckets[ni][:k]
buckets[ni] = buckets[ni][k:]
else:
k = math.floor(plen/2)
buckets[i] = buckets[pi][:k]
buckets[pi] = buckets[pi][k:]
selected = []
for bucket in buckets:
bii = math.floor(len(bucket)/2)
selected.append(bucket[bii])
return selected
else:
raise ValueError('Unsupported select_policy ' + policy)
else:
return states
def _populate_from_lists(self, my_scenes, my_states_by_scene, max_states_per_scene):
self.scenes = my_scenes
for scene in my_scenes:
scene_id = scene['id']
self.scenes_by_id[scene_id] = scene
if scene_id in my_states_by_scene:
my_states = self._select_n_states(my_states_by_scene[scene_id], max_states_per_scene)
self.states_by_scene[scene_id] = my_states
self.states += my_states
def _load_scenes(self, filename, scene_filter):
with bz2.open(filename, 'rt') if filename.endswith('bz2') else open(filename) as f:
reader = csv.DictReader(f)
self.scenes = []
for r in reader:
for v in ['nrooms', 'nobjects', 'nlevels']:
if v in r:
r[v] = int(r[v])
for v in ['dimX', 'dimY', 'dimZ', 'floorArea']:
if v in r:
r[v] = float(r[v])
if scene_filter and not scene_filter(r):
continue
self.scenes.append(r)
self.scenes_by_id[r['id']] = r
self.scenes.sort(key=lambda x: x['nobjects'])
def _load_states(self, filename, max_states_per_scene, state_filter):
with bz2.open(filename, 'rt') if filename.endswith('bz2') else open(filename) as f:
reader = csv.DictReader(f)
all_states = [r for r in reader]
# Convert scene state and group by sceneId
counter = 0
for r in all_states:
for v in ['startX', 'startY', 'startZ', 'startAngle', 'goalX', 'goalY', 'goalZ', 'dist', 'pathDist']:
r[v] = float(r[v]) if v in r else None
for v in ['episodeId', 'pathNumDoors', 'pathNumRooms', 'level']:
r[v] = int(r[v]) if v in r else None
scene_id = r['sceneId']
scene_states = self.states_by_scene.setdefault(scene_id, [])
rec = {
'episode_id': counter,
'scene_id': r['sceneId'],
'room_id': r['roomId'],
'start': {'position': [r['startX'], r['startY'], r['startZ']], 'angle': r['startAngle']},
'goal': {'id': r['goalObjectId'], 'position': [r['goalX'], r['goalY'], r['goalZ']]},
'dist': r['dist']
}
for k in ['pathDist', 'pathNumRooms', 'pathRoomIds', 'pathNumDoors', 'pathDoorIds', 'level']:
if k in r:
rec[k] = r[k]
if not state_filter or state_filter(rec):
scene_states.append(rec)
counter = counter + 1
# Filter down to states per scene and create big list of all scenes
states = []
for scene_id, scene_states in self.states_by_scene.items():
self.states_by_scene[scene_id] = self._select_n_states(scene_states, max_states_per_scene)
states += self.states_by_scene[scene_id]
self.states = states
def _embed_states_in_scenes(self):
for state in self.states:
scene_id = state['scene_id']
if scene_id in self.scenes_by_id:
self.scenes_by_id[scene_id].setdefault('states', []).append(state)
scenes_with_no_states = []
for i, scene in enumerate(self.scenes):
if 'states' not in scene or len(scene['states']) == 0:
scenes_with_no_states.append(scene['id'])
del self.scenes_by_id[scene['id']]
self.scenes = [s for s in self.scenes if s['id'] not in scenes_with_no_states]
#print('Removed scenes with no episode states: ' + ','.join(scenes_with_no_states))
def main():
import argparse
# Argument processing
parser = argparse.ArgumentParser(description='Load state set')
parser.add_argument('-n', '--limit',
type=int,
help='Number of states per scene')
parser.add_argument('--select',
default=Select.FIRST,
type=Select,
help='Number of states per scene')
parser.add_argument('--field',
default=None,
help='Field to use for selection')
parser.add_argument('--scenes',
type=str,
default=None,
help='Scenes file to load')
parser.add_argument('input',
help='Input file to load')
args = parser.parse_args()
state_set = StateSet(scenes_file=args.scenes,
states_files=args.input,
max_states_per_scene=args.limit,
select_policy=SelectPolicy(args.select, args.field))
for state in state_set.states:
print(state)
if __name__ == "__main__":
main()
| [((8663, 8716), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Load state set"""'}), "(description='Load state set')\n", (8686, 8716), False, 'import argparse\n'), ((5410, 5427), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (5424, 5427), False, 'import csv\n'), ((6173, 6190), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (6187, 6190), False, 'import csv\n'), ((5310, 5334), 'bz2.open', 'bz2.open', (['filename', '"""rt"""'], {}), "(filename, 'rt')\n", (5318, 5334), False, 'import bz2\n'), ((6073, 6097), 'bz2.open', 'bz2.open', (['filename', '"""rt"""'], {}), "(filename, 'rt')\n", (6081, 6097), False, 'import bz2\n'), ((2736, 2752), 'math.ceil', 'math.ceil', (['(r * i)'], {}), '(r * i)\n', (2745, 2752), False, 'import math\n'), ((3409, 3445), 'math.ceil', 'math.ceil', (['((state[field] - fmin) / r)'], {}), '((state[field] - fmin) / r)\n', (3418, 3445), False, 'import math\n'), ((4096, 4116), 'math.floor', 'math.floor', (['(nlen / 2)'], {}), '(nlen / 2)\n', (4106, 4116), False, 'import math\n'), ((4292, 4312), 'math.floor', 'math.floor', (['(plen / 2)'], {}), '(plen / 2)\n', (4302, 4312), False, 'import math\n')] |
pmatigakis/pagetags | pagetags/configuration/development.py | 5e81d01493548edc2677453819c32de3cf75d159 | DEBUG = True
TESTING = False
| [] |
SvenMarcus/hpc-rocket | hpcrocket/pyfilesystem/factory.py | b28917e7afe6e2e839d1ae58f2e21fba6e3eb61c | from hpcrocket.core.filesystem import Filesystem, FilesystemFactory
from hpcrocket.core.launchoptions import Options
from hpcrocket.pyfilesystem.localfilesystem import LocalFilesystem
from hpcrocket.pyfilesystem.sshfilesystem import SSHFilesystem
class PyFilesystemFactory(FilesystemFactory):
def __init__(self, options: Options) -> None:
self._options = options
def create_local_filesystem(self) -> Filesystem:
return LocalFilesystem(".")
def create_ssh_filesystem(self) -> Filesystem:
connection = self._options.connection
proxyjumps = self._options.proxyjumps
return SSHFilesystem(connection, proxyjumps)
| [((447, 467), 'hpcrocket.pyfilesystem.localfilesystem.LocalFilesystem', 'LocalFilesystem', (['"""."""'], {}), "('.')\n", (462, 467), False, 'from hpcrocket.pyfilesystem.localfilesystem import LocalFilesystem\n'), ((627, 664), 'hpcrocket.pyfilesystem.sshfilesystem.SSHFilesystem', 'SSHFilesystem', (['connection', 'proxyjumps'], {}), '(connection, proxyjumps)\n', (640, 664), False, 'from hpcrocket.pyfilesystem.sshfilesystem import SSHFilesystem\n')] |
vincewhite/cassandra-dtest | cqlsh_tests/cqlsh_tools.py | a01dce6af73a8656e8740227a811fe63025fb3f4 | import csv
import random
import cassandra
from cassandra.cluster import ResultSet
from typing import List
class DummyColorMap(object):
def __getitem__(self, *args):
return ''
def csv_rows(filename, delimiter=None):
"""
Given a filename, opens a csv file and yields it line by line.
"""
reader_opts = {}
if delimiter is not None:
reader_opts['delimiter'] = delimiter
with open(filename, 'rb') as csvfile:
for row in csv.reader(csvfile, **reader_opts):
yield row
def assert_csvs_items_equal(filename1, filename2):
with open(filename1, 'r') as x, open(filename2, 'r') as y:
assert list(x.readlines()) == list(y.readlines())
def random_list(gen=None, n=None):
if gen is None:
def gen():
return random.randint(-1000, 1000)
if n is None:
def length():
return random.randint(1, 5)
else:
def length():
return n
return [gen() for _ in range(length())]
def write_rows_to_csv(filename, data):
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
for row in data:
writer.writerow(row)
csvfile.close
def deserialize_date_fallback_int(byts, protocol_version):
timestamp_ms = cassandra.marshal.int64_unpack(byts)
try:
return cassandra.util.datetime_from_timestamp(timestamp_ms / 1000.0)
except OverflowError:
return timestamp_ms
def monkeypatch_driver():
"""
Monkeypatches the `cassandra` driver module in the same way
that clqsh does. Returns a dictionary containing the original values of
the monkeypatched names.
"""
cache = {'BytesType_deserialize': cassandra.cqltypes.BytesType.deserialize,
'DateType_deserialize': cassandra.cqltypes.DateType.deserialize,
'support_empty_values': cassandra.cqltypes.CassandraType.support_empty_values}
cassandra.cqltypes.BytesType.deserialize = staticmethod(lambda byts, protocol_version: bytearray(byts))
cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int)
cassandra.cqltypes.CassandraType.support_empty_values = True
if hasattr(cassandra, 'deserializers'):
cache['DesDateType'] = cassandra.deserializers.DesDateType
del cassandra.deserializers.DesDateType
return cache
def unmonkeypatch_driver(cache):
"""
Given a dictionary that was used to cache parts of `cassandra` for
monkeypatching, restore those values to the `cassandra` module.
"""
cassandra.cqltypes.BytesType.deserialize = staticmethod(cache['BytesType_deserialize'])
cassandra.cqltypes.DateType.deserialize = staticmethod(cache['DateType_deserialize'])
cassandra.cqltypes.CassandraType.support_empty_values = cache['support_empty_values']
if hasattr(cassandra, 'deserializers'):
cassandra.deserializers.DesDateType = cache['DesDateType']
def assert_resultset_contains(got: ResultSet, expected: List[tuple]) -> None:
"""
So this is slow. I would hope a ResultSet has the capability of pulling data by PK or clustering,
however I'm not finding it atm. As such, this method isn't intended for use with large datasets.
:param got: ResultSet, expect schema of [a, b]
:param expected: list of tuples with 2 members corresponding with a/b schema of ResultSet
"""
# Adding a touch of sanity check so people don't mis-use this. n^2 is bad.
assert len(expected) <= 1000, 'This is a slow comparison method. Don\'t use for > 1000 tuples.'
# First quick check: if we have a different count, we can just die.
assert len(got.current_rows) == len(expected)
for t in expected:
assert len(t) == 2, 'Got unexpected tuple len. Expected 2, got tuple: {}'.format(t)
found = False
for row in got.current_rows:
if found:
break
if row.a == t[0] and row.b == t[1]:
found = True
assert found, 'Failed to find expected row: {}'.format(t)
| [((1287, 1323), 'cassandra.marshal.int64_unpack', 'cassandra.marshal.int64_unpack', (['byts'], {}), '(byts)\n', (1317, 1323), False, 'import cassandra\n'), ((474, 508), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile, **reader_opts)\n', (484, 508), False, 'import csv\n'), ((1107, 1126), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (1117, 1126), False, 'import csv\n'), ((1348, 1409), 'cassandra.util.datetime_from_timestamp', 'cassandra.util.datetime_from_timestamp', (['(timestamp_ms / 1000.0)'], {}), '(timestamp_ms / 1000.0)\n', (1386, 1409), False, 'import cassandra\n'), ((801, 828), 'random.randint', 'random.randint', (['(-1000)', '(1000)'], {}), '(-1000, 1000)\n', (815, 828), False, 'import random\n'), ((888, 908), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (902, 908), False, 'import random\n')] |
LastRemote/sagemaker-python-sdk | tests/unit/sagemaker/tensorflow/test_estimator_init.py | fddf29d9e4383cd3f939253eef47ee79a464dd37 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from mock import Mock, patch
from packaging import version
import pytest
from sagemaker.tensorflow import TensorFlow
REGION = "us-west-2"
ENV_INPUT = {"env_key1": "env_val1", "env_key2": "env_val2", "env_key3": "env_val3"}
@pytest.fixture()
def sagemaker_session():
return Mock(name="sagemaker_session", boto_region_name=REGION)
def _build_tf(sagemaker_session, **kwargs):
return TensorFlow(
sagemaker_session=sagemaker_session,
entry_point="dummy.py",
role="dummy-role",
instance_count=1,
instance_type="ml.c4.xlarge",
**kwargs,
)
@patch("sagemaker.fw_utils.python_deprecation_warning")
def test_estimator_py2_deprecation_warning(warning, sagemaker_session):
estimator = _build_tf(sagemaker_session, framework_version="2.1.1", py_version="py2")
assert estimator.py_version == "py2"
warning.assert_called_with("tensorflow", "2.1.1")
def test_py2_version_deprecated(sagemaker_session):
with pytest.raises(AttributeError) as e:
_build_tf(sagemaker_session, framework_version="2.1.2", py_version="py2")
msg = (
"Python 2 containers are only available with 2.1.1 and lower versions. "
"Please use a Python 3 container."
)
assert msg in str(e.value)
def test_py2_version_is_not_deprecated(sagemaker_session):
estimator = _build_tf(sagemaker_session, framework_version="1.15.0", py_version="py2")
assert estimator.py_version == "py2"
estimator = _build_tf(sagemaker_session, framework_version="2.0.0", py_version="py2")
assert estimator.py_version == "py2"
def test_framework_name(sagemaker_session):
tf = _build_tf(sagemaker_session, framework_version="1.15.2", py_version="py3")
assert tf._framework_name == "tensorflow"
def test_tf_add_environment_variables(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
environment=ENV_INPUT,
)
assert tf.environment == ENV_INPUT
def test_tf_miss_environment_variables(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
environment=None,
)
assert not tf.environment
def test_enable_sm_metrics(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
enable_sagemaker_metrics=True,
)
assert tf.enable_sagemaker_metrics
def test_disable_sm_metrics(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
enable_sagemaker_metrics=False,
)
assert not tf.enable_sagemaker_metrics
def test_disable_sm_metrics_if_fw_ver_is_less_than_1_15(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) > version.Version("1.14"):
pytest.skip("This test is for TF 1.14 and lower.")
tf = _build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
image_uri="old-image",
)
assert tf.enable_sagemaker_metrics is None
def test_enable_sm_metrics_if_fw_ver_is_at_least_1_15(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) < version.Version("1.15"):
pytest.skip("This test is for TF 1.15 and higher.")
tf = _build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
)
assert tf.enable_sagemaker_metrics
def test_require_image_uri_if_fw_ver_is_less_than_1_11(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) > version.Version("1.10"):
pytest.skip("This test is for TF 1.10 and lower.")
with pytest.raises(ValueError) as e:
_build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
)
expected_msg = (
"TF {version} supports only legacy mode. Please supply the image URI directly with "
"'image_uri=520713654638.dkr.ecr.{region}.amazonaws.com/"
"sagemaker-tensorflow:{version}-cpu-py2' and set 'model_dir=False'. If you are using any "
"legacy parameters (training_steps, evaluation_steps, checkpoint_path, requirements_file), "
"make sure to pass them directly as hyperparameters instead."
).format(version=tensorflow_training_version, region=REGION)
assert expected_msg in str(e.value)
| [((829, 845), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (843, 845), False, 'import pytest\n'), ((1202, 1256), 'mock.patch', 'patch', (['"""sagemaker.fw_utils.python_deprecation_warning"""'], {}), "('sagemaker.fw_utils.python_deprecation_warning')\n", (1207, 1256), False, 'from mock import Mock, patch\n'), ((882, 937), 'mock.Mock', 'Mock', ([], {'name': '"""sagemaker_session"""', 'boto_region_name': 'REGION'}), "(name='sagemaker_session', boto_region_name=REGION)\n", (886, 937), False, 'from mock import Mock, patch\n'), ((995, 1152), 'sagemaker.tensorflow.TensorFlow', 'TensorFlow', ([], {'sagemaker_session': 'sagemaker_session', 'entry_point': '"""dummy.py"""', 'role': '"""dummy-role"""', 'instance_count': '(1)', 'instance_type': '"""ml.c4.xlarge"""'}), "(sagemaker_session=sagemaker_session, entry_point='dummy.py',\n role='dummy-role', instance_count=1, instance_type='ml.c4.xlarge', **kwargs\n )\n", (1005, 1152), False, 'from sagemaker.tensorflow import TensorFlow\n'), ((1578, 1607), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1591, 1607), False, 'import pytest\n'), ((3489, 3533), 'packaging.version.Version', 'version.Version', (['tensorflow_training_version'], {}), '(tensorflow_training_version)\n', (3504, 3533), False, 'from packaging import version\n'), ((3536, 3559), 'packaging.version.Version', 'version.Version', (['"""1.14"""'], {}), "('1.14')\n", (3551, 3559), False, 'from packaging import version\n'), ((3569, 3619), 'pytest.skip', 'pytest.skip', (['"""This test is for TF 1.14 and lower."""'], {}), "('This test is for TF 1.14 and lower.')\n", (3580, 3619), False, 'import pytest\n'), ((4008, 4052), 'packaging.version.Version', 'version.Version', (['tensorflow_training_version'], {}), '(tensorflow_training_version)\n', (4023, 4052), False, 'from packaging import version\n'), ((4055, 4078), 'packaging.version.Version', 'version.Version', (['"""1.15"""'], {}), "('1.15')\n", (4070, 4078), False, 'from packaging import version\n'), ((4088, 4139), 'pytest.skip', 'pytest.skip', (['"""This test is for TF 1.15 and higher."""'], {}), "('This test is for TF 1.15 and higher.')\n", (4099, 4139), False, 'import pytest\n'), ((4490, 4534), 'packaging.version.Version', 'version.Version', (['tensorflow_training_version'], {}), '(tensorflow_training_version)\n', (4505, 4534), False, 'from packaging import version\n'), ((4537, 4560), 'packaging.version.Version', 'version.Version', (['"""1.10"""'], {}), "('1.10')\n", (4552, 4560), False, 'from packaging import version\n'), ((4570, 4620), 'pytest.skip', 'pytest.skip', (['"""This test is for TF 1.10 and lower."""'], {}), "('This test is for TF 1.10 and lower.')\n", (4581, 4620), False, 'import pytest\n'), ((4631, 4656), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4644, 4656), False, 'import pytest\n')] |
sofwerx/mycroft-articlekeyword-skill | testing.py | 7cab109db512d3a6465db241b18018e9415f4a9f |
import subprocess
proc = subprocess.Popen(['python3', 'articlekeywords.py', 'aih.txt' , '5'], stdout=subprocess.PIPE )
#print(type(proc.communicate()[0]))
# path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/'
text = proc.stdout.read()
rows = text.splitlines()
#print(text.splitlines())
count = 0
s = ""
for row in rows:
divide = row.split()
wordCount = len(divide)
if wordCount > 1:
count = count + 1
s += str(count)
s += " "
s += str(divide[1])
s += " "
print(s)
# with open(path + 'out.csv', 'r') as content_file:
# text = content_file.read()
# self.speak_dialog("bitcoin.price", data={'price': str(text)})
#file_path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/out.csv'
#wordCount = 10
#
# text = Path(file_path).read_text()
# #print(exit_code) | [((27, 123), 'subprocess.Popen', 'subprocess.Popen', (["['python3', 'articlekeywords.py', 'aih.txt', '5']"], {'stdout': 'subprocess.PIPE'}), "(['python3', 'articlekeywords.py', 'aih.txt', '5'], stdout=\n subprocess.PIPE)\n", (43, 123), False, 'import subprocess\n')] |
mirrorcoloured/slcypi | slcyGeneral.py | c47975b3523f770d12a521c82e2dfca181e3f35b | # Python 2.7.1
import RPi.GPIO as GPIO
from twython import Twython
import time
import sys
import os
import pygame
APP_KEY='zmmlyAJzMDIntLpDYmSH98gbw'
APP_SECRET='ksfSVa2hxvTQKYy4UR9tjpb57CAynMJDsygz9qOyzlH24NVwpW'
OAUTH_TOKEN='794094183841566720-BagrHW91yH8C3Mdh9SOlBfpL6wrSVRW'
OAUTH_TOKEN_SECRET='d0Uucq2dkSHrFHZGLM1X8Hw05d80ajKYGl1zTRxZQSKTm'
applepislcy = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
### GENERAL ###
def Cleanup():
GPIO.cleanup()
def Sleep(seconds):
"""Puts the program to sleep"""
time.sleep(seconds)
def Alert(channel):
"""Simple alert function for testing event interrupts"""
print('Alert on channel',channel)
def TimeString():
"""Returns the current time"""
t = time.localtime()
return str(t[0])+'.'+str(t[1])+'.'+str(t[2])+'.'+str(t[3])+'.'+str(t[4])+'.'+str(t[5])
def LoadPins(mapping,inp):
"""Organizes an input into a pin mapping dict
mapping <list>, ['IA','IB']
inp <dict>, <list>, <int> {'IA':1,'IB':2}, [1,2]
"""
if type(inp) is int and len(mapping) == 1:
return {mapping[0]:inp}
elif type(inp) is list and len(mapping) == len(inp):
o = {}
for i in range(len(inp)):
o[mapping[i]] = inp[i]
return o
elif type(inp) is dict:
return inp
else:
print('Invalid input for pins:',inp,type(inp))
print('Expected:',mapping)
return {}
def BoolToSign(inp):
"""Converts boolean bits into signed bits
0 -> -1
1 -> 1"""
return (inp * 2) - 1
def SignToBool(inp):
"""Converts signed bits into boolean bits
-1 -> 0
1 -> 1"""
return (inp + 1) / 2
### PYGAME ###
def WindowSetup(size=(300,50),caption='',text='',background=(0,0,0),foreground=(255,255,255)):
"""Sets up a pygame window to take keyboard input
size <tuple>, width by height
caption <str>, window title bar
text <str>, text to display in window, accepts \n
background <tuple>, foreground <tuple>, (r,g,b) color
"""
pygame.init()
screen = pygame.display.set_mode(size,0,32)
pygame.display.set_caption(caption)
myfont = pygame.font.SysFont('Monospace',15)
labels = []
lines = text.split('\n')
for line in lines:
labels.append(myfont.render(line,1,foreground))
screen.fill(background)
y = 0
for label in labels:
screen.blit(label, (0,y))
y += 15
pygame.display.update()
def InputLoop(eventmap):
"""Begins a pygame loop, mapping key inputs to functions
eventmap <dict>, {pygame.K_t:myfunction}
"""
index = 0
while True:
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
#print("{0}: You pressed {1:c}".format ( index , event.key ))
if event.key in eventmap:
eventmap[event.key]()
elif event.type == pygame.QUIT:
pygame.quit()
sys.exit()
def InputLoopDemo():
def dog():
print('woof')
def cat():
print('meow')
def fish():
print('blub')
WindowSetup(caption='pet simulator',text='d for dog\nc for cat\nf for fish')
InputLoop({pygame.K_d:dog, pygame.K_c:cat, pygame.K_f:fish})
### TWITTER ###
def Tweet(twit,statustext):
"""Tweets a message
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
statustext <str>, must be <= 140 characters
"""
if len(statustext) > 140:
print('ERROR: Character limit 140 exceeded:',len(statustext))
else:
twit.update_status(status=statustext)
def TweetPicture(twit,file,statustext):
"""Tweets a message with a picture
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
file <str>, path and filename to picture
statustext <str>, must be <= 140 characters
"""
photo = open(file, 'rb')
response = twitter.upload_media(media=photo)
twit.update_status(status=statustext, media_ids=[response['media_id']])
def TweetVideo(twit,file,statustext):
"""Tweets a message with a video
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
file <str>, path and filename to video
statustext <str>, must be <= 140 characters
"""
video = open(file, 'rb')
response = twitter.upload_video(media=video, media_type='video/mp4')
twit.update_status(status=statustext, media_ids=[response['media_id']])
| [((362, 423), 'twython.Twython', 'Twython', (['APP_KEY', 'APP_SECRET', 'OAUTH_TOKEN', 'OAUTH_TOKEN_SECRET'], {}), '(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n', (369, 423), False, 'from twython import Twython\n'), ((461, 475), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (473, 475), True, 'import RPi.GPIO as GPIO\n'), ((537, 556), 'time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (547, 556), False, 'import time\n'), ((739, 755), 'time.localtime', 'time.localtime', ([], {}), '()\n', (753, 755), False, 'import time\n'), ((2018, 2031), 'pygame.init', 'pygame.init', ([], {}), '()\n', (2029, 2031), False, 'import pygame\n'), ((2045, 2081), 'pygame.display.set_mode', 'pygame.display.set_mode', (['size', '(0)', '(32)'], {}), '(size, 0, 32)\n', (2068, 2081), False, 'import pygame\n'), ((2084, 2119), 'pygame.display.set_caption', 'pygame.display.set_caption', (['caption'], {}), '(caption)\n', (2110, 2119), False, 'import pygame\n'), ((2133, 2169), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Monospace"""', '(15)'], {}), "('Monospace', 15)\n", (2152, 2169), False, 'import pygame\n'), ((2410, 2433), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2431, 2433), False, 'import pygame\n'), ((2621, 2639), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2637, 2639), False, 'import pygame\n'), ((2936, 2949), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2947, 2949), False, 'import pygame\n'), ((2966, 2976), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2974, 2976), False, 'import sys\n')] |
PetervdPerk-NXP/pyarmnn-release | python/pyarmnn/scripts/generate_docs.py | 2008c270f7c7c84a930842c845138628c8b95713 | # Copyright © 2019 Arm Ltd. All rights reserved.
# Copyright 2020 NXP
# SPDX-License-Identifier: MIT
import os
import tarfile
import pyarmnn as ann
import shutil
from typing import List, Union
from pdoc.cli import main
package_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
def __copy_file_to_dir(file_paths: Union[List[str], str], target_dir_path: str):
"""Copies multiple files to a directory.
Args:
file_paths (Union[List(str)]): List of files to copy
target_dir_path (str): Target directory.
Returns:
None
"""
file_paths = [] + file_paths
if not (os.path.exists(target_dir_path) and os.path.isdir(target_dir_path)):
os.makedirs(target_dir_path)
for file_path in file_paths:
if not (os.path.exists(file_path) and os.path.isfile(file_path)):
raise RuntimeError('Not a file: {}'.format(file_path))
file_name = os.path.basename(file_path)
shutil.copyfile(file_path, os.path.join(str(target_dir_path), file_name))
def archive_docs(path: str, version: str):
"""Creates an archive.
Args:
path (str): Path which will be archived.
version (str): Version of Arm NN.
Returns:
None
"""
output_filename = f'pyarmnn_docs-{version}.tar'
with tarfile.open(os.path.join(package_dir, output_filename), "w") as tar:
tar.add(path)
if __name__ == "__main__":
readme_filename = os.path.join(package_dir, '..', '..', 'README.md')
with open(readme_filename, 'r') as readme_file:
top_level_pyarmnn_doc = ''.join(readme_file.readlines())
ann.__doc__ = top_level_pyarmnn_doc
main()
target_path = os.path.join(package_dir, 'docs')
archive_docs(target_path, ann.__version__)
| [((1525, 1575), 'os.path.join', 'os.path.join', (['package_dir', '""".."""', '""".."""', '"""README.md"""'], {}), "(package_dir, '..', '..', 'README.md')\n", (1537, 1575), False, 'import os\n'), ((1747, 1753), 'pdoc.cli.main', 'main', ([], {}), '()\n', (1751, 1753), False, 'from pdoc.cli import main\n'), ((1773, 1806), 'os.path.join', 'os.path.join', (['package_dir', '"""docs"""'], {}), "(package_dir, 'docs')\n", (1785, 1806), False, 'import os\n'), ((281, 307), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (297, 307), False, 'import os\n'), ((743, 771), 'os.makedirs', 'os.makedirs', (['target_dir_path'], {}), '(target_dir_path)\n', (754, 771), False, 'import os\n'), ((974, 1001), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (990, 1001), False, 'import os\n'), ((665, 696), 'os.path.exists', 'os.path.exists', (['target_dir_path'], {}), '(target_dir_path)\n', (679, 696), False, 'import os\n'), ((701, 731), 'os.path.isdir', 'os.path.isdir', (['target_dir_path'], {}), '(target_dir_path)\n', (714, 731), False, 'import os\n'), ((1390, 1432), 'os.path.join', 'os.path.join', (['package_dir', 'output_filename'], {}), '(package_dir, output_filename)\n', (1402, 1432), False, 'import os\n'), ((825, 850), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (839, 850), False, 'import os\n'), ((855, 880), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (869, 880), False, 'import os\n')] |
jeffzhengye/pylearn | tests/gejun_sum.py | a140d0fca8a371faada194cb0126192675cc2045 | __author__ = 'jeffye'
def sum_consecutives(s):
i = 1
li = []
if i < len(s):
n = 1
while s[i] != s[i + 1] and s[i] != s[i - 1]:
sum = s[i]
i = i + 1
return sum
while s[i] == s[i + 1]:
n = n + 1
sum = s[i] * n
i = i + 1
return sum
li.append(sum)
return li
def sum_consecutives_corrected(s):
start = 0
li = []
n = 1
while start < len(s):
if start == len(s) - 1: # last element
li.append(s[start])
break
elif s[start] == s[start + n]: # equal, just record the length
n += 1
else: # first not equal, sum all previous equal elements and append to li
li.append(sum(s[start: start + n]))
start += n
n = 1
return li
if __name__ == '__main__':
test_li = [-5, -5, 7, 7, 12, 0] # should return [-10,14,12,0]
print sum_consecutives_corrected(test_li)
| [] |
tscher/PySDDP | PySDDP/term.py | ece69b77c951cbb1f046ac184f6fe4fc025ad690 | class term(object):
# Dados de cadastro das usinas termeletrica (presentes no TERM.DAT)
Codigo = None
Nome = None
Potencia = None
FCMax = None
TEIF = None
IP = None
GTMin = None
# Dados Adicionais Especificados no arquivo de configuracao termica (CONFT)
Sist = None
Status = None
Classe = None
# Dados Adicionais Especificados no arquivo de classe termica (CLAST)
Custo = None
NomeClasse = None
TipoComb = None
def insere(self, custo, gmax):
self.custo = custo
self.gmax = gmax
| [] |
b2bs-team/pylint-errors | plerr/__main__.py | f1362c8afbe6b7075f805560d7699f63ad35a10b | """plerr entrypoint"""
from plerr import cli
if __name__ == '__main__':
cli.main()
| [((77, 87), 'plerr.cli.main', 'cli.main', ([], {}), '()\n', (85, 87), False, 'from plerr import cli\n')] |
CamouOkau/messenger_new_years_bot | code/send.py | 38f3c26b6c5b4dae7fe48f8b61680ec903c0deac | import sys
import time
from datetime import datetime
from bot import FbMessengerBot
if __name__ == "__main__":
if len(sys.argv) < 3:
print("No email or password provided")
else:
bot = FbMessengerBot(sys.argv[1], sys.argv[2])
with open("users.txt", "r") as file:
users = dict.fromkeys(file.read().split("\n"))
for user in users:
users[user] = bot.uid(user)
with open("message.txt", "r") as file:
message = file.read()
time_now = datetime.now()
send_time = datetime(time_now.year + 1, 1, 1)
wait_time = (send_time - time_now).total_seconds()
print("Waiting...")
time.sleep(wait_time)
for uid in users.values():
bot.send_message(message, uid)
bot.logout()
| [((210, 250), 'bot.FbMessengerBot', 'FbMessengerBot', (['sys.argv[1]', 'sys.argv[2]'], {}), '(sys.argv[1], sys.argv[2])\n', (224, 250), False, 'from bot import FbMessengerBot\n'), ((531, 545), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (543, 545), False, 'from datetime import datetime\n'), ((566, 599), 'datetime.datetime', 'datetime', (['(time_now.year + 1)', '(1)', '(1)'], {}), '(time_now.year + 1, 1, 1)\n', (574, 599), False, 'from datetime import datetime\n'), ((695, 716), 'time.sleep', 'time.sleep', (['wait_time'], {}), '(wait_time)\n', (705, 716), False, 'import time\n')] |
ghanshyammann/senlin-tempest-plugin | senlin_tempest_plugin/api/policies/test_policy_update_negative.py | 9f33bbe723eb381f93c2248a6a277efef3d92ec3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from senlin_tempest_plugin.api import base
from senlin_tempest_plugin.common import utils
class TestPolicyUpdateNegativeNotFound(base.BaseSenlinAPITest):
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5df90d82-9889-4c6f-824c-30272bcfa767')
def test_policy_update_policy_not_found(self):
ex = self.assertRaises(exceptions.NotFound,
self.client.update_obj, 'policies',
'5df90d82-9889-4c6f-824c-30272bcfa767',
{'policy': {'name': 'new-name'}})
message = ex.resp_body['error']['message']
self.assertEqual(
"The policy '5df90d82-9889-4c6f-824c-30272bcfa767' "
"could not be found.", str(message))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('29414add-9cba-4b72-a7bb-36718671dcab')
def test_policy_update_policy_invalid_param(self):
ex = self.assertRaises(exceptions.BadRequest,
self.client.update_obj, 'policies',
'5df90d82-9889-4c6f-824c-30272bcfa767',
{'policy': {'boo': 'foo'}})
message = ex.resp_body['error']['message']
self.assertEqual(
"Additional properties are not allowed (u'boo' was "
"unexpected)", str(message))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('bf26ed1e-1d26-4472-b4c8-0bcca1c0a838')
def test_policy_update_policy_empty_param(self):
ex = self.assertRaises(exceptions.BadRequest,
self.client.update_obj, 'policies',
'5df90d82-9889-4c6f-824c-30272bcfa767',
{})
message = ex.resp_body['error']['message']
self.assertEqual(
"Malformed request data, missing 'policy' key in "
"request body.", str(message))
class TestPolicyUpdateNegativeBadRequest(base.BaseSenlinAPITest):
def setUp(self):
super(TestPolicyUpdateNegativeBadRequest, self).setUp()
# Create a policy
policy_id = utils.create_a_policy(self)
self.addCleanup(utils.delete_a_policy, self, policy_id)
self.policy_id = policy_id
@decorators.attr(type=['negative'])
@decorators.idempotent_id('31242de5-55ac-4589-87a1-a9940e4beca2')
def test_policy_update_no_property_updated(self):
# No property is updated.
params = {
'policy': {}
}
# Verify badrequest exception(400) is raised.
ex = self.assertRaises(exceptions.BadRequest,
self.client.update_obj,
'policies', self.policy_id, params)
message = ex.resp_body['error']['message']
self.assertEqual(
"'name' is a required property", str(message))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('d2ca7de6-0069-48c9-b3de-ee975a2428dc')
def test_policy_update_spec_not_updatable(self):
# Try to update spec of policy.
# Note: name is the only property that can be updated
# after policy is created.
params = {
'policy': {
'name': 'new-name',
'spec': {'k1': 'v1'}
}
}
# Verify badrequest exception(400) is raised.
ex = self.assertRaises(exceptions.BadRequest,
self.client.update_obj,
'policies', self.policy_id, params)
message = ex.resp_body['error']['message']
self.assertEqual(
"Additional properties are not allowed (u'spec' was "
"unexpected)", str(message))
| [((779, 813), 'tempest.lib.decorators.attr', 'decorators.attr', ([], {'type': "['negative']"}), "(type=['negative'])\n", (794, 813), False, 'from tempest.lib import decorators\n'), ((819, 883), 'tempest.lib.decorators.idempotent_id', 'decorators.idempotent_id', (['"""5df90d82-9889-4c6f-824c-30272bcfa767"""'], {}), "('5df90d82-9889-4c6f-824c-30272bcfa767')\n", (843, 883), False, 'from tempest.lib import decorators\n'), ((1388, 1422), 'tempest.lib.decorators.attr', 'decorators.attr', ([], {'type': "['negative']"}), "(type=['negative'])\n", (1403, 1422), False, 'from tempest.lib import decorators\n'), ((1428, 1492), 'tempest.lib.decorators.idempotent_id', 'decorators.idempotent_id', (['"""29414add-9cba-4b72-a7bb-36718671dcab"""'], {}), "('29414add-9cba-4b72-a7bb-36718671dcab')\n", (1452, 1492), False, 'from tempest.lib import decorators\n'), ((1989, 2023), 'tempest.lib.decorators.attr', 'decorators.attr', ([], {'type': "['negative']"}), "(type=['negative'])\n", (2004, 2023), False, 'from tempest.lib import decorators\n'), ((2029, 2093), 'tempest.lib.decorators.idempotent_id', 'decorators.idempotent_id', (['"""bf26ed1e-1d26-4472-b4c8-0bcca1c0a838"""'], {}), "('bf26ed1e-1d26-4472-b4c8-0bcca1c0a838')\n", (2053, 2093), False, 'from tempest.lib import decorators\n'), ((2891, 2925), 'tempest.lib.decorators.attr', 'decorators.attr', ([], {'type': "['negative']"}), "(type=['negative'])\n", (2906, 2925), False, 'from tempest.lib import decorators\n'), ((2931, 2995), 'tempest.lib.decorators.idempotent_id', 'decorators.idempotent_id', (['"""31242de5-55ac-4589-87a1-a9940e4beca2"""'], {}), "('31242de5-55ac-4589-87a1-a9940e4beca2')\n", (2955, 2995), False, 'from tempest.lib import decorators\n'), ((3511, 3545), 'tempest.lib.decorators.attr', 'decorators.attr', ([], {'type': "['negative']"}), "(type=['negative'])\n", (3526, 3545), False, 'from tempest.lib import decorators\n'), ((3551, 3615), 'tempest.lib.decorators.idempotent_id', 'decorators.idempotent_id', (['"""d2ca7de6-0069-48c9-b3de-ee975a2428dc"""'], {}), "('d2ca7de6-0069-48c9-b3de-ee975a2428dc')\n", (3575, 3615), False, 'from tempest.lib import decorators\n'), ((2758, 2785), 'senlin_tempest_plugin.common.utils.create_a_policy', 'utils.create_a_policy', (['self'], {}), '(self)\n', (2779, 2785), False, 'from senlin_tempest_plugin.common import utils\n')] |
hal0x2328/neo3-boa | boa3_test/test_sc/interop_test/contract/DestroyContract.py | 6825a3533384cb01660773050719402a9703065b | from boa3.builtin import public
from boa3.builtin.interop.contract import destroy_contract
@public
def Main():
destroy_contract()
| [((117, 135), 'boa3.builtin.interop.contract.destroy_contract', 'destroy_contract', ([], {}), '()\n', (133, 135), False, 'from boa3.builtin.interop.contract import destroy_contract\n')] |
qinfeng2011/wltp | tests/test_vmax.py | 317ad38fb96599a29d22e40f69b6aeb4d205611d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 European Commission (JRC);
# Licensed under the EUPL (the 'Licence');
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl
import functools as fnt
import logging
import random
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from pandas import IndexSlice as _ix
from wltp import engine, vehicle, downscale, vmax
from wltp.io import gear_names, veh_names
from . import vehdb
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
def test_v_max(h5_accdb):
from . import conftest
veh_samples = None
# DEBUG: to reduce clutter in the console.
# veh_samples = 12
# DEBUG: to study buggy cars.
# veh_samples = [76] # diff det_by_nlim
# veh_samples = [3, 21, 22, 104, ] # diff gear
# veh_samples = [38] # diff vmax order higher 1st
# veh_samples = [31] # [23]
def make_v_maxes(vehnum):
props, wot, n2vs = vehdb.load_vehicle_accdb(h5_accdb, vehnum)
wot = wot.rename({"Pwot": "p"}, axis=1)
wot["n"] = wot.index
gwots = engine.interpolate_wot_on_v_grid(wot, n2vs)
gwots = engine.calc_p_avail_in_gwots(gwots, SM=0.1)
gwots["p_resist"] = vehicle.calc_road_load_power(
gwots.index, props.f0, props.f1, props.f2
)
rec = vmax.calc_v_max(gwots)
return (props["v_max"], rec.v_max, props["gear_v_max"], rec.g_vmax, rec.wot)
def _package_wots_df(gear_wot_dfs):
assert gear_wot_dfs
## Merge all index values into the index of the 1st DF,
# or else, themerged-df contains n-gear dupes in each index-value.
#
# first_df, *rest_dfs = gear_wot_dfs.values()
# full_index = np.unique(np.hstack(df.index for df in gear_wot_dfs))
# first_df = first_df.reindex(full_index)
wots_df = pd.concat(
# [first_df] + rest_dfs,
gear_wot_dfs.values(),
axis=1,
# join="inner",
keys=gear_names(gear_wot_dfs.keys()),
names=["item", "gear"],
verify_integrity=True,
)
return wots_df
veh_nums = vehdb.all_vehnums(h5_accdb)
if not isinstance(veh_samples, (list, tuple)):
veh_samples = random.sample(veh_nums, veh_samples) if veh_samples else veh_nums
recs = [make_v_maxes(vehnum) for vehnum in veh_samples]
vehres = pd.DataFrame(
recs,
columns="vmax_accdb vmax_python gmax_accdb gmax_python wot".split(),
index=veh_names(veh_samples),
).astype({"gmax_accdb": "Int64", "gmax_python": "Int64"})
wots_df = pd.concat(
vehres["wot"].values, keys=veh_names(veh_samples), names=["vehicle"]
)
vehres = vehres.drop("wot", axis=1)
vehres["vmax_diff"] = (vehres["vmax_python"] - vehres["vmax_accdb"]).abs()
vehres["gmax_diff"] = (vehres["gmax_python"] - vehres["gmax_accdb"]).abs()
with pd.option_context(
"display.max_rows",
130,
"display.max_columns",
20,
"display.width",
120,
# "display.precision",
# 4,
# "display.chop_threshold",
# 1e-8,
"display.float_format",
"{:0.2f}".format,
):
print(
f"++ nones: {vehres.vmax_python.sum()} (out of {len(veh_samples)})"
f"\n++++\n{vehres}"
# f"\n++++\n{wots_df.sample(80, axis=0)}"
)
with pd.option_context(
"display.max_columns",
20,
"display.width",
120,
"display.float_format",
"{:0.4f}".format,
):
print(f"\n++++\n{vehres.describe().T}")
vehres = vehres.dropna(axis=1)
# npt.assert_array_equal(vmaxes["vmax_python"], vmaxes["vmax_accdb"])
aggregate_tol = 1e-4 # The digits copied from terminal.
assert (
vehres["vmax_diff"].describe()
- [125.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]
< aggregate_tol
).all()
assert (
vehres["gmax_diff"].describe()
- [125.0000, 0.1040, 0.3552, 0.0000, 0.0000, 0.0000, 0.0000, 2.0000]
< aggregate_tol
).all()
assert (vehres["vmax_diff"] == 0).sum() == 125 and (
vehres["gmax_diff"] == 0
).sum() == 125
| [((564, 604), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (583, 604), False, 'import logging\n'), ((611, 638), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (628, 638), False, 'import logging\n'), ((1202, 1245), 'wltp.engine.interpolate_wot_on_v_grid', 'engine.interpolate_wot_on_v_grid', (['wot', 'n2vs'], {}), '(wot, n2vs)\n', (1234, 1245), False, 'from wltp import engine, vehicle, downscale, vmax\n'), ((1262, 1305), 'wltp.engine.calc_p_avail_in_gwots', 'engine.calc_p_avail_in_gwots', (['gwots'], {'SM': '(0.1)'}), '(gwots, SM=0.1)\n', (1290, 1305), False, 'from wltp import engine, vehicle, downscale, vmax\n'), ((1334, 1405), 'wltp.vehicle.calc_road_load_power', 'vehicle.calc_road_load_power', (['gwots.index', 'props.f0', 'props.f1', 'props.f2'], {}), '(gwots.index, props.f0, props.f1, props.f2)\n', (1362, 1405), False, 'from wltp import engine, vehicle, downscale, vmax\n'), ((1442, 1464), 'wltp.vmax.calc_v_max', 'vmax.calc_v_max', (['gwots'], {}), '(gwots)\n', (1457, 1464), False, 'from wltp import engine, vehicle, downscale, vmax\n'), ((3039, 3176), 'pandas.option_context', 'pd.option_context', (['"""display.max_rows"""', '(130)', '"""display.max_columns"""', '(20)', '"""display.width"""', '(120)', '"""display.float_format"""', '"""{:0.2f}""".format'], {}), "('display.max_rows', 130, 'display.max_columns', 20,\n 'display.width', 120, 'display.float_format', '{:0.2f}'.format)\n", (3056, 3176), True, 'import pandas as pd\n'), ((3541, 3653), 'pandas.option_context', 'pd.option_context', (['"""display.max_columns"""', '(20)', '"""display.width"""', '(120)', '"""display.float_format"""', '"""{:0.4f}""".format'], {}), "('display.max_columns', 20, 'display.width', 120,\n 'display.float_format', '{:0.4f}'.format)\n", (3558, 3653), True, 'import pandas as pd\n'), ((2373, 2409), 'random.sample', 'random.sample', (['veh_nums', 'veh_samples'], {}), '(veh_nums, veh_samples)\n', (2386, 2409), False, 'import random\n'), ((2783, 2805), 'wltp.io.veh_names', 'veh_names', (['veh_samples'], {}), '(veh_samples)\n', (2792, 2805), False, 'from wltp.io import gear_names, veh_names\n'), ((2636, 2658), 'wltp.io.veh_names', 'veh_names', (['veh_samples'], {}), '(veh_samples)\n', (2645, 2658), False, 'from wltp.io import gear_names, veh_names\n')] |
giuseppe/quay | util/canonicaljson.py | a1b7e4b51974edfe86f66788621011eef2667e6a | import collections
def canonicalize(json_obj, preserve_sequence_order=True):
"""
This function canonicalizes a Python object that will be serialized as JSON.
Example usage: json.dumps(canonicalize(my_obj))
Args:
json_obj (object): the Python object that will later be serialized as JSON.
Returns:
object: json_obj now sorted to its canonical form.
"""
if isinstance(json_obj, collections.MutableMapping):
sorted_obj = sorted(
{
key: canonicalize(val, preserve_sequence_order) for key, val in json_obj.items()
}.items()
)
return collections.OrderedDict(sorted_obj)
elif isinstance(json_obj, (list, tuple)):
seq = [canonicalize(val, preserve_sequence_order) for val in json_obj]
return seq if preserve_sequence_order else sorted(seq)
return json_obj
| [((636, 671), 'collections.OrderedDict', 'collections.OrderedDict', (['sorted_obj'], {}), '(sorted_obj)\n', (659, 671), False, 'import collections\n')] |
russss/datasette-geo | datasette_plugin_geo/inspect.py | d4cecc020848bbde91e9e17bf352f7c70bc3dccf | from datasette import hookimpl
from datasette.utils import detect_spatialite
from shapely import wkt
def get_spatial_tables(conn):
if not detect_spatialite(conn):
return {}
spatial_tables = {}
c = conn.cursor()
c.execute(
"""SELECT f_table_name, f_geometry_column, srid, spatial_index_enabled
FROM geometry_columns"""
)
for row in c.fetchall():
if row[3] != 1:
print(
"Column {column} in table {table} has no spatial index; datasette-geo will ignore it.".format(
column=row[1], table=row[0]
)
)
continue
spatial_tables[row[0]] = row[1]
return spatial_tables
def get_bounds(conn, spatial_tables):
c = conn.cursor()
res = {}
for table, column in spatial_tables.items():
c.execute(
"SELECT AsText(Envelope(GUnion({column}))) FROM {table}".format(
table=table, column=column
)
)
data = c.fetchone()[0]
if data is None:
continue
bbox = wkt.loads(data)
res[table] = bbox.bounds
return res
| [((144, 167), 'datasette.utils.detect_spatialite', 'detect_spatialite', (['conn'], {}), '(conn)\n', (161, 167), False, 'from datasette.utils import detect_spatialite\n'), ((1106, 1121), 'shapely.wkt.loads', 'wkt.loads', (['data'], {}), '(data)\n', (1115, 1121), False, 'from shapely import wkt\n')] |
Sinon/microcosm-pubsub | microcosm_pubsub/context.py | c98a188fcd5b3f358c7171dae0c39a33c5774a4e | """
Message context.
"""
from typing import Dict
from microcosm.api import defaults, typed
from microcosm.config.types import boolean
from microcosm_logging.decorators import logger
from microcosm_pubsub.constants import TTL_KEY, URI_KEY
from microcosm_pubsub.message import SQSMessage
@defaults(
enable_ttl=typed(boolean, default_value=True),
initial_ttl=typed(int, default_value=32),
)
@logger
class SQSMessageContext:
"""
Factory for per-message contexts.
"""
def __init__(self, graph):
self.enable_ttl = graph.config.sqs_message_context.enable_ttl
self.initial_ttl = graph.config.sqs_message_context.initial_ttl
def __call__(self, context: SQSMessage, **kwargs) -> Dict[str, str]:
"""
Create a new context from a message.
"""
return self.from_sqs_message(context, **kwargs)
def from_sqs_message(self, message: SQSMessage, **kwargs):
context: Dict = dict(message.opaque_data)
context.update(
# include the message id
message_id=message.message_id,
**kwargs,
)
# include the TTL (if enabled)
if self.enable_ttl:
ttl = message.ttl if message.ttl is not None else self.initial_ttl
context[TTL_KEY] = str(ttl - 1)
# include the URI (if there is one)
if message.uri:
context[URI_KEY] = message.uri
return context
| [((317, 351), 'microcosm.api.typed', 'typed', (['boolean'], {'default_value': '(True)'}), '(boolean, default_value=True)\n', (322, 351), False, 'from microcosm.api import defaults, typed\n'), ((369, 397), 'microcosm.api.typed', 'typed', (['int'], {'default_value': '(32)'}), '(int, default_value=32)\n', (374, 397), False, 'from microcosm.api import defaults, typed\n')] |
murdockcrc/python-tricks | azure_ml/pytorch_classifier/train_parameterized.py | 57f7ad9c00a045c1f9f18f89bed6e73be6c85b69 | import os
import argparse
import torch
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from model import Net
from azureml.core import Run
run = Run.get_context()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_path',
type=str,
help='Path to the training data'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.001,
help='Learning rate for SGD'
)
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='Momentum for SGD'
)
args = parser.parse_args()
print("===== DATA =====")
print("DATA PATH: " + args.data_path)
print("LIST FILES IN DATA PATH...")
print(os.listdir(args.data_path))
print("================")
# prepare DataLoader for CIFAR10 data
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = torchvision.datasets.CIFAR10(
root=args.data_path,
train=True,
download=False,
transform=transform,
)
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=4,
shuffle=True,
num_workers=2
)
# define convolutional network
net = Net()
# set up pytorch loss / optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(
net.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
)
# train the network
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# unpack the data
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999:
loss = running_loss / 2000
run.log('loss', loss) # log loss metric to AML
print(f'epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}')
running_loss = 0.0
print('Finished Training') | [((189, 206), 'azureml.core.Run.get_context', 'Run.get_context', ([], {}), '()\n', (204, 206), False, 'from azureml.core import Run\n'), ((246, 271), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (269, 271), False, 'import argparse\n'), ((1000, 1103), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'args.data_path', 'train': '(True)', 'download': '(False)', 'transform': 'transform'}), '(root=args.data_path, train=True, download=\n False, transform=transform)\n', (1028, 1103), False, 'import torchvision\n'), ((1144, 1229), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': '(4)', 'shuffle': '(True)', 'num_workers': '(2)'}), '(trainset, batch_size=4, shuffle=True, num_workers=2\n )\n', (1171, 1229), False, 'import torch\n'), ((1295, 1300), 'model.Net', 'Net', ([], {}), '()\n', (1298, 1300), False, 'from model import Net\n'), ((1353, 1380), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (1378, 1380), False, 'import torch\n'), ((760, 786), 'os.listdir', 'os.listdir', (['args.data_path'], {}), '(args.data_path)\n', (770, 786), False, 'import os\n'), ((898, 919), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (917, 919), True, 'import torchvision.transforms as transforms\n'), ((927, 981), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (947, 981), True, 'import torchvision.transforms as transforms\n')] |
kazqvaizer/arq-sqlalchemy-boilerplate | src/tests/test_app_db.py | c14596ed358a061e6eb2a380f4bd962242b123f3 | import pytest
from app.db import session_scope
pytestmark = pytest.mark.asyncio
async def test_engine_configured(env):
async with session_scope() as session:
assert str(session.bind.engine.url) == env("SQLALCHEMY_DATABASE_URI")
| [((138, 153), 'app.db.session_scope', 'session_scope', ([], {}), '()\n', (151, 153), False, 'from app.db import session_scope\n')] |
cgarciae/catalyst | catalyst/core/callbacks/formatters.py | 391ff89ab0d9a1961b88719e894f917ac0fb7fc3 | from abc import ABC, abstractmethod
from datetime import datetime
import json
import logging
from catalyst import utils
from catalyst.core import _State
class MetricsFormatter(ABC, logging.Formatter):
"""
Abstract metrics formatter
"""
def __init__(self, message_prefix):
"""
Args:
message_prefix: logging format string
that will be prepended to message
"""
super().__init__(f"{message_prefix}{{message}}", style="{")
@abstractmethod
def _format_message(self, state: _State):
pass
def format(self, record: logging.LogRecord):
"""
Format message string
"""
# noinspection PyUnresolvedReferences
state = record.state
record.msg = self._format_message(state)
return super().format(record)
class TxtMetricsFormatter(MetricsFormatter):
"""
Translate batch metrics in human-readable format.
This class is used by ``logging.Logger`` to make a string from record.
For details refer to official docs for 'logging' module.
Note:
This is inner class used by Logger callback,
no need to use it directly!
"""
def __init__(self):
"""
Initializes the ``TxtMetricsFormatter``
"""
super().__init__("[{asctime}] ")
def _format_metrics(self, metrics):
# metrics : dict[str: dict[str: float]]
metrics_formatted = {}
for key, value in metrics.items():
metrics_formatted_ = [
utils.format_metric(m_name, m_value)
for m_name, m_value in sorted(value.items())
]
metrics_formatted_ = " | ".join(metrics_formatted_)
metrics_formatted[key] = metrics_formatted_
return metrics_formatted
def _format_message(self, state: _State):
message = [""]
metrics = self._format_metrics(state.metric_manager.epoch_values)
for key, value in metrics.items():
message.append(
f"{state.stage_epoch_log}/{state.num_epochs} "
f"* Epoch {state.epoch_log} ({key}): {value}"
)
message = "\n".join(message)
return message
class JsonMetricsFormatter(MetricsFormatter):
"""
Translate batch metrics in json format.
This class is used by ``logging.Logger`` to make a string from record.
For details refer to official docs for 'logging' module.
Note:
This is inner class used by Logger callback,
no need to use it directly!
"""
def __init__(self):
"""
Initializes the ``JsonMetricsFormatter``
"""
super().__init__("")
def _format_message(self, state: _State):
res = dict(
metirics=state.metric_manager.epoch_values.copy(),
epoch=state.epoch,
time=datetime.now().isoformat()
)
return json.dumps(res, indent=True, ensure_ascii=False)
__all__ = ["MetricsFormatter", "TxtMetricsFormatter", "JsonMetricsFormatter"]
| [((2934, 2982), 'json.dumps', 'json.dumps', (['res'], {'indent': '(True)', 'ensure_ascii': '(False)'}), '(res, indent=True, ensure_ascii=False)\n', (2944, 2982), False, 'import json\n'), ((1552, 1588), 'catalyst.utils.format_metric', 'utils.format_metric', (['m_name', 'm_value'], {}), '(m_name, m_value)\n', (1571, 1588), False, 'from catalyst import utils\n'), ((2882, 2896), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2894, 2896), False, 'from datetime import datetime\n')] |
Suraj1127/fuzzy-matcher | fuzzy/fuzzy.py | a3a6ecc6954d79ca65e2517f93db44cc432e7a90 | #!/usr/bin/env python3
"""
Description: Python script to append the common columns in one sheet from another sheet using fuzzy matching.
"""
import pip
def import_or_install(package):
try:
__import__(package)
except ImportError:
pip.main(['install', package])
import os
import sys
import argparse
import_or_install('numpy')
import_or_install('pandas')
import_or_install('fuzzywuzzy')
import numpy as np
import pandas as pd
from fuzzywuzzy import process, fuzz
class FuzzyMatcher:
"""
FuzzyMatcher class to perform the fuzzy matching.
"""
def __init__(self, df_1, df_2, columns_1, columns_2, append_in='second'):
"""
The constructor takes five arguments. The last argument 'append_in' is optional.
Parameters:
df_1: the first table in pandas.DataFrame format or the name of the CSV file for the first table
df_2: the second table in pandas.DataFrame format or the name of the CSV file for the second table
columns_1: list of common columns in the first table
columns_2: list of common columns in the second table
append_in (optional):
'first' if the common columns are to be appended in the first table
'second' if the common columns are to be appended in the second table
"""
if type(df_1) == str:
df_1 = pd.read_csv(df_1)
if type(df_2) == str:
df_2 = pd.read_csv(df_2)
df_1.columns = df_1.columns.str.lower().str.strip()
df_2.columns = df_2.columns.str.lower().str.strip()
columns_1 = [i.lower().strip() for i in columns_1]
columns_2 = [i.lower().strip() for i in columns_2]
if append_in == 'first':
temp = df_1
df_1 = df_2
df_2 = temp
temp = columns_1
columns_1 = columns_2
columns_2 = temp
self.df_1 = df_1.rename(columns=dict(zip(columns_1, columns_2)))
self.columns = columns_2
self.df_2 = self._fuzzy_match(self.df_1, df_2, self.columns[0])
@staticmethod
def _string_matching(name, collection, mapping_):
"""
Returns similar name using fuzzy matching.
"""
if name in collection:
return name
if name in mapping_:
return mapping_[name]
similar = process.extractOne(name, collection, scorer=fuzz.ratio)[0]
mapping_[name] = similar
return similar
def _fuzzy_match(self, df_1_t, df_2_t, common_column_t):
"""
Returns dataframe with the common column appended.
Notice that the appended columns end with '_t'.
"""
collection = set(df_1_t[common_column_t])
mapping_ = {}
df_2_t[common_column_t + '_t'] = df_2_t[common_column_t].apply(self._string_matching, args=(collection, mapping_))
return df_2_t
@property
def fuzzy_match(self):
"""
Returns the dataframe consisting of all the appended columns.
"""
for i_t, common_column in enumerate(self.columns[1:], start=1):
self.df_2[common_column + '_t'] = np.nan
group_1 = self.df_1.groupby(self.columns[:i_t])
group_2 = self.df_2.groupby([i + '_t' for i in self.columns[:i_t]])
for key, df_slice_2 in group_2:
df_slice_1 = group_1.get_group(key)
df_slice_2 = self._fuzzy_match(df_slice_1, df_slice_2, common_column)
self.df_2.loc[df_slice_2.index, common_column + '_t'] = df_slice_2.loc[:, common_column + '_t']
return self.df_2
def save(self, filename):
"""
Saves the result dataframe to a CSV file, filename.
"""
self.df_2.to_csv(filename)
def parse_args(parser):
"""
Parsing and configuration of the command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--firstcsv', type=str, required=True, help='CSV file for first table.')
parser.add_argument('--secondcsv', type=str, required=True, help='CSV file for second table.')
parser.add_argument('--destination', type=str, default='output.csv', help='Destination filename.')
parser.add_argument('--commoncolumns1', type=str, required=True, help='Common columns for first table.')
parser.add_argument('--commoncolumns2', type=str, required=True, help='Common columns for second table in the same order.')
parser.add_argument("--in", dest="_in", default='second', choices=['second', 'first'], help='Table to append the columns. ')
return check_args(parser.parse_args())
def check_args(args):
"""
Checking the arguments if they are entered properly.
Validations performed:
1. Compulsory arguments are entered.
2. The entered filenames are present in the current folder.
3. The entered column names are present in the corresponding files.
4. If the destination filename is already present in the directory, ask the user if it can be overwritten.
"""
# for --firstcsv and --secondcsv
for filename in [args.firstcsv, args.secondcsv]:
if not os.path.isfile(filename):
raise Exception("File {} is not present in the currrent folder.".format(filename))
# --commoncolumns1
commoncolumns1 = [i.strip().lower() for i in args.commoncolumns1.split(',')]
temp = set(commoncolumns1) - set(pd.read_csv(args.firstcsv, nrows=1).columns.str.lower().str.strip())
if temp:
raise Exception("The following columns are not present in the file, {}:\n{}".format(args.firstcsv, temp))
# --commoncolumns2
commoncolumns2 = [i.strip().lower() for i in args.commoncolumns2.split(',')]
temp = set(commoncolumns2) - set(pd.read_csv(args.secondcsv, nrows=1).columns.str.lower().str.strip())
if temp:
raise Exception("The following columns are not present in the file, {}:\n{}".format(args.secondcsv, temp))
# --destination
if os.path.isfile(args.destination):
print("The file {} already exists. Do you want to overwrite it? y/n".format(args.destination))
ans = input().strip().lower()
if ans == 'n':
print("Please enter different destination filename and run the script again.")
sys.exit()
return args
if __name__ == "__main__":
# instantiate the ArgumentParser class and parse the arguments
parser = argparse.ArgumentParser()
arguments = parse_args(parser)
# save the arguments as some variables which later would be passed to FuzzyMatcher class
filename_1 = arguments.firstcsv
filename_2 = arguments.secondcsv
result_filename = arguments.destination
# clean and lowercase-ize the columns names
common_columns_1 = [i.strip().lower() for i in arguments.commoncolumns1.split(',')]
common_columns_2 = [i.strip().lower() for i in arguments.commoncolumns2.split(',')]
# instantiate the FuzzyMatcher object, perform the fuzzy match, and save the result to the destination CSV file
fuzzy_matcher = FuzzyMatcher(filename_1, filename_2, common_columns_1, common_columns_2, append_in=arguments._in)
fuzzy_matcher.fuzzy_match
fuzzy_matcher.save(result_filename)
| [((4202, 4227), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4225, 4227), False, 'import argparse\n'), ((6315, 6347), 'os.path.isfile', 'os.path.isfile', (['args.destination'], {}), '(args.destination)\n', (6329, 6347), False, 'import os\n'), ((6779, 6804), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6802, 6804), False, 'import argparse\n'), ((255, 285), 'pip.main', 'pip.main', (["['install', package]"], {}), "(['install', package])\n", (263, 285), False, 'import pip\n'), ((1402, 1419), 'pandas.read_csv', 'pd.read_csv', (['df_1'], {}), '(df_1)\n', (1413, 1419), True, 'import pandas as pd\n'), ((1482, 1499), 'pandas.read_csv', 'pd.read_csv', (['df_2'], {}), '(df_2)\n', (1493, 1499), True, 'import pandas as pd\n'), ((2509, 2564), 'fuzzywuzzy.process.extractOne', 'process.extractOne', (['name', 'collection'], {'scorer': 'fuzz.ratio'}), '(name, collection, scorer=fuzz.ratio)\n', (2527, 2564), False, 'from fuzzywuzzy import process, fuzz\n'), ((5480, 5504), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (5494, 5504), False, 'import os\n'), ((6625, 6635), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6633, 6635), False, 'import sys\n'), ((5743, 5778), 'pandas.read_csv', 'pd.read_csv', (['args.firstcsv'], {'nrows': '(1)'}), '(args.firstcsv, nrows=1)\n', (5754, 5778), True, 'import pandas as pd\n'), ((6085, 6121), 'pandas.read_csv', 'pd.read_csv', (['args.secondcsv'], {'nrows': '(1)'}), '(args.secondcsv, nrows=1)\n', (6096, 6121), True, 'import pandas as pd\n')] |
zkbt/exopop | exoatlas/visualizations/panels/BubblePanel.py | 5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8 | from .Panel import *
__all__ = ['BubblePanel']
default_size = plt.matplotlib.rcParams['lines.markersize']**2
class BubblePanel(Panel):
'''
BubblePanel is a general wrapper for making scatter plots
where planets are represented as bubbles that can have
informative sizes and/or colors.
'''
def __init__(self,
xaxis=None,
yaxis=None,
size=None, size_normalization=None,
color=None, cmap='plasma', vmin=None, vmax=None, color_normalization=None,
**kw):
'''
Initialize a plotting panel.
Parameters
----------
size : PlottableAxis, str, float, None
What should the sizes of points be or encode?
size_normalization : float
If sizes depend on quantities,
how should they be normalized?
color : PlottableAxis, str, float, None
What should the colors of points be or encode?
cmap : str, cmap from plt.matplotlib.cm
If the colors depend on quantities,
what cmap should be used for them?
vmin : float, astropy.units.quantity.Quantity
If the colors depend on quantities,
what should the bottom of the cmap be?
vmax : float, astropy.units.quantity.Quantity
If the colors depend on quantities,
what should the top of the cmap be?
color_normalization : matplotlib.colors.Normalize
If color depend on quantities, how should
the values be normalized. If color_normalization
is defined, any values provided here for
vmin and vmax will be ignored.
**kw : dict
Other keywords will be passed on to *all*
Panel/Plottable initializations (which may
include x, y, size, and color). If you need
more fine-grained control over which axis
gets which keyword, consider initializing
those panels one-by-one.
'''
# initialize the basics of the panel with the plottable axes
Panel.__init__(self, xaxis=xaxis, yaxis=yaxis, **kw)
# set up how we should scale the sizes of points
size = clean_axis(size)
try:
# try to make a variable size axis
self.plottable['size'] = size(panel=self, **kw)
default_size_normalization = self.plottable['size'].size_normalization
except TypeError:
# otherwise, use a single size for all points
self.plottable['size'] = size
default_size_normalization = 1
#self.plottable['x'].panel = self
#self.plottable['y'].panel = self
# make sure a size normalization has been defined
self.size_normalization = size_normalization or default_size_normalization
# set up how we should set the colors of points
color = clean_axis(color)
try:
# try to make a variable color axis
self.plottable['color'] = color(panel=self, **kw)
default_lim = self.plottable['color'].lim
except TypeError:
# otherwise, use a single color for all points
self.plottable['color'] = color
default_lim = [None, None]
# if an actual cmap was provided, use it
if isinstance(cmap, plt.matplotlib.colors.Colormap):
self.cmap = cmap
# otherwise, treat the cmap as a string key
else:
self.cmap = plt.matplotlib.cm.cmap_d[cmap]
# make sure the color map limits are set
self.vmin = vmin or default_lim[0]
self.vmax = vmax or default_lim[1]
# if a custom normalization is used, reset vmin + vmax
self.color_normalization = color_normalization
if isinstance(self.color_normalization,
plt.matplotlib.colors.Normalize):
# pull the normalization's min/max for information
self.vmin = color_normalization.vmin
self.vmax = color_normalization.vmax
# apply (x,y) axis labels, scales, limits appropriately
for axis in 'xy':
for attribute in ['label', 'scale', 'lim']:
setattr(self,
f'{axis}{attribute}',
getattr(self.plottable[axis],
attribute))
#DEBUG
self.summarize()
def get_sizes(self):
'''
The sizes of the bubbles.
Returns
-------
s : an input for plt.scatter
Either a single scalar, or an array with variable
sizes for each bubble according to some quantity.
'''
# should we ignore any variable size instructions?
if self.pop.respond_to_size == False:
size = self.pop.plotkw.get('s', None)
# if desired, set variable sizes
elif isinstance(self.plottable['size'], PlottableAxis):
# get the raw values for the sizes
x = self.plottable['size'].value()
# calculate the normalized size
size = default_size*x/self.size_normalization
# otherwise, set a single size
else:
# get default, first from pop and then from panel
size = self.pop.plotkw.get('s', self.plottable['size'])
# return a valid input to plt.scatter(s=...)
return size
def get_colors(self):
'''
The colors of the bubbles.
Returns
-------
c : an input for plt.scatter
Either a single color, or an array with variable
colors for each bubble according to some quantity.
'''
# should we ignore any variable color instructions?
if self.pop.respond_to_color == False:
color = self.pop.color
# should we use a variable color?
elif isinstance(self.plottable['color'], PlottableAxis):
# get the raw values to go into the color
x = self.plottable['color'].value()
# FIXME - make sure to check vmin/vmax are valid
#if (self.vmin is None) or (self.vmax is None):
# raise AtlasError(f'''
# It looks like you're trying to use
# {self.plottable['color']} to set variable
# colors for bubbles. To do so, please make
# sure it has finite values defined for its
# .vmin and .vmax attributes.
# ''')
# make sure we have *some* normalizer defined
f = plt.matplotlib.colors.Normalize
self.color_normalization = (self.color_normalization
or f(vmin=self.vmin, vmax=self.vmax))
normalized = self.color_normalization(x)
color = self.cmap(normalized)
# finally, should we just use a default color?
else:
# get default, first from pop and then from panel
color = self.pop.color
if color is None:
color = self.plottable['color']
# return a valid input to any one of the following:
# plt.scatter(c=...)
# plt.scatter(edgecolors=...)
# plt.scatter(facecolors=...)
return color
def kw(self, key=None, **kwargs):
'''
Do a little decision-making about the plotting keyword
arguments, pulling defaults from each population where
needed.
Parameter
---------
key : str
The population for which we should pull keywords.
If None, go with the current population.
**kwargs : dict
All other keywords will be directed toward
overwriting individual population defaults.
'''
# identify the population we're working with
if key is None:
key = self.key
#else:
self.point_at(key)
# define some default keywords, which can be over-written
default = dict(s=self.get_sizes(),
marker=self.pop.marker,
linewidth=self.pop.linewidth,
alpha=self.pop.alpha,
zorder=self.pop.zorder,
label=self.pop.label)
# sort out whether faces and/or edges should get color
c=self.get_colors()
if self.pop.filled:
default['facecolors'] = c
else:
default['facecolors'] = 'none'
if self.pop.outlined:
default['edgecolors'] = c
else:
default['edgecolors'] = 'none'
# if any other keywords are provided, overwrite these defaults
for k, v in kwargs.items():
default[k] = v
return default
def plot(self, key, ax=None, labelkw={}, **kwargs):
'''
Add the points for a particular population to this panel.
Parameters
----------
key : str
The population (as an item in the self.pops dictionary) to add.
ax :
Into what ax should we place this plot?
If None, use default.
labelkw : dict
Keywords for labeling the planet names.
**kwargs : dict
Any extra keywords will be passed on to `scatter`
'''
# focus attention on that population
self.point_at(key)
# make sure we're plotting into the appropriate axes
try:
plt.sca(self.ax)
except AttributeError:
self.setup(ax=ax)
# add the scattered points
self.scattered[key] = self.ax.scatter(self.x, self.y, **self.kw(key,**kwargs))
# set the scales, limits, labels
self.finish_plot(labelkw=labelkw)
| [] |
pengwu/scapy_env | venv/lib/python2.7/dist-packages/landscape/sysinfo/load.py | 3db9c5dea2e219048a2387649d6d89be342903d9 | import os
from twisted.internet.defer import succeed
class Load(object):
def register(self, sysinfo):
self._sysinfo = sysinfo
def run(self):
self._sysinfo.add_header("System load", str(os.getloadavg()[0]))
return succeed(None)
| [((250, 263), 'twisted.internet.defer.succeed', 'succeed', (['None'], {}), '(None)\n', (257, 263), False, 'from twisted.internet.defer import succeed\n'), ((214, 229), 'os.getloadavg', 'os.getloadavg', ([], {}), '()\n', (227, 229), False, 'import os\n')] |
dougmorato/bag-of-holding | src/boh_api/viewsets.py | 8a7bc45ced8837bdb00da60dcfb496bb0271f161 | from rest_framework import viewsets
from boh import models
from . import serializers
class OrganizationViewSet(viewsets.ModelViewSet):
queryset = models.Organization.objects.all()
serializer_class = serializers.OrganizationSerializer
class ApplicationViewSet(viewsets.ModelViewSet):
queryset = models.Application.objects.all()
serializer_class = serializers.ApplicationSerializer
class TagViewSet(viewsets.ModelViewSet):
queryset = models.Tag.objects.all()
serializer_class = serializers.TagSerializer
class PersonViewSet(viewsets.ModelViewSet):
queryset = models.Person.objects.all()
serializer_class = serializers.PersonSerializer
| [((154, 187), 'boh.models.Organization.objects.all', 'models.Organization.objects.all', ([], {}), '()\n', (185, 187), False, 'from boh import models\n'), ((312, 344), 'boh.models.Application.objects.all', 'models.Application.objects.all', ([], {}), '()\n', (342, 344), False, 'from boh import models\n'), ((460, 484), 'boh.models.Tag.objects.all', 'models.Tag.objects.all', ([], {}), '()\n', (482, 484), False, 'from boh import models\n'), ((595, 622), 'boh.models.Person.objects.all', 'models.Person.objects.all', ([], {}), '()\n', (620, 622), False, 'from boh import models\n')] |
TatendaNoreen/Python | githubintro-fe2d832af2bad7d6b27d036c205cc9d8414b2183/CommunicationAnimation.py | df9799bbea84af03c1fb3b29fada1e16c04bab80 | import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot
import agentframework
import csv
import matplotlib.animation
#create environment in which agents will operate
environment=[]
#read csv downloaded file
f = open('in.txt', newline='')
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader:
rowlist=[] # A list of rows
environment.append(rowlist)
for value in row: # A list of value
#print(value) # Floats
rowlist.append(value)
f.close() # Don't close until you are done with the reader;
# the data is read on request.
#def distance_between(agents_row_a, agents_row_b):
# return (((agents_row_a.x - agents_row_b.x)**2) +
# ((agents_row_a.y - agents_row_b.y)**2))**0.5
num_of_agents = 10
num_of_iterations = 10
neighbourhood = 20
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
# Make the agents and connecting with the environment.
agents = []
def update(frame_number):
fig.clear()
for i in range(num_of_agents):
agents.append(agentframework.Agent(environment,agents))
# Move and eat agents with every move or iteration.
for j in range(num_of_iterations):
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
agents[i].share_with_neighbours(neighbourhood)
# Loop through the agents in self.agents .
# Calculate the distance between self and the current other agent:
# distance = self.distance_between(agent)
# If distance is less than or equal to the neighbourhood
# Sum self.store and agent.store .
# Divide sum by two to calculate average.
# self.store = average
# agent.store = average
# End if
# End loop
# plot
matplotlib.pyplot.xlim(0, 299)
matplotlib.pyplot.ylim(0, 299)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x,agents[i].y)
matplotlib.pyplot.imshow(environment)
animation = matplotlib.animation.FuncAnimation(fig, update, interval=1)
matplotlib.pyplot.show()
| [((18, 41), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (32, 41), False, 'import matplotlib\n'), ((265, 308), 'csv.reader', 'csv.reader', (['f'], {'quoting': 'csv.QUOTE_NONNUMERIC'}), '(f, quoting=csv.QUOTE_NONNUMERIC)\n', (275, 308), False, 'import csv\n'), ((837, 877), 'matplotlib.pyplot.figure', 'matplotlib.pyplot.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (861, 877), False, 'import matplotlib\n'), ((2119, 2178), 'matplotlib.animation.FuncAnimation', 'matplotlib.animation.FuncAnimation', (['fig', 'update'], {'interval': '(1)'}), '(fig, update, interval=1)\n', (2153, 2178), False, 'import matplotlib\n'), ((2179, 2203), 'matplotlib.pyplot.show', 'matplotlib.pyplot.show', ([], {}), '()\n', (2201, 2203), False, 'import matplotlib\n'), ((1892, 1922), 'matplotlib.pyplot.xlim', 'matplotlib.pyplot.xlim', (['(0)', '(299)'], {}), '(0, 299)\n', (1914, 1922), False, 'import matplotlib\n'), ((1927, 1957), 'matplotlib.pyplot.ylim', 'matplotlib.pyplot.ylim', (['(0)', '(299)'], {}), '(0, 299)\n', (1949, 1957), False, 'import matplotlib\n'), ((2060, 2097), 'matplotlib.pyplot.imshow', 'matplotlib.pyplot.imshow', (['environment'], {}), '(environment)\n', (2084, 2097), False, 'import matplotlib\n'), ((2001, 2052), 'matplotlib.pyplot.scatter', 'matplotlib.pyplot.scatter', (['agents[i].x', 'agents[i].y'], {}), '(agents[i].x, agents[i].y)\n', (2026, 2052), False, 'import matplotlib\n'), ((1091, 1132), 'agentframework.Agent', 'agentframework.Agent', (['environment', 'agents'], {}), '(environment, agents)\n', (1111, 1132), False, 'import agentframework\n')] |
opencv/openvino_training_extensions | external/model-preparation-algorithm/tests/conftest.py | f5d809741e192a2345558efc75899a475019cf98 | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
try:
import e2e.fixtures
from e2e.conftest_utils import * # noqa
from e2e.conftest_utils import pytest_addoption as _e2e_pytest_addoption # noqa
from e2e import config # noqa
from e2e.utils import get_plugins_from_packages
pytest_plugins = get_plugins_from_packages([e2e])
except ImportError:
_e2e_pytest_addoption = None
pass
import config
import pytest
from ote_sdk.test_suite.pytest_insertions import *
from ote_sdk.test_suite.training_tests_common import REALLIFE_USECASE_CONSTANT
pytest_plugins = get_pytest_plugins_from_ote()
ote_conftest_insertion(default_repository_name='ote/training_extensions/external/model-preparation-algorithm')
@pytest.fixture
def ote_test_domain_fx():
return 'model-preparation-algorithm'
@pytest.fixture
def ote_test_scenario_fx(current_test_parameters_fx):
assert isinstance(current_test_parameters_fx, dict)
if current_test_parameters_fx.get('usecase') == REALLIFE_USECASE_CONSTANT:
return 'performance'
else:
return 'integration'
@pytest.fixture(scope='session')
def ote_templates_root_dir_fx():
import os.path as osp
import logging
logger = logging.getLogger(__name__)
root = osp.dirname(osp.dirname(osp.realpath(__file__)))
root = f'{root}/configs/'
logger.debug(f'overloaded ote_templates_root_dir_fx: return {root}')
return root
@pytest.fixture(scope='session')
def ote_reference_root_dir_fx():
import os.path as osp
import logging
logger = logging.getLogger(__name__)
root = osp.dirname(osp.dirname(osp.realpath(__file__)))
root = f'{root}/tests/reference/'
logger.debug(f'overloaded ote_reference_root_dir_fx: return {root}')
return root
# pytest magic
def pytest_generate_tests(metafunc):
ote_pytest_generate_tests_insertion(metafunc)
def pytest_addoption(parser):
ote_pytest_addoption_insertion(parser)
| [((1154, 1185), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1168, 1185), False, 'import pytest\n'), ((1496, 1527), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1510, 1527), False, 'import pytest\n'), ((356, 388), 'e2e.utils.get_plugins_from_packages', 'get_plugins_from_packages', (['[e2e]'], {}), '([e2e])\n', (381, 388), False, 'from e2e.utils import get_plugins_from_packages\n'), ((1281, 1308), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1298, 1308), False, 'import logging\n'), ((1623, 1650), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1640, 1650), False, 'import logging\n'), ((1345, 1367), 'os.path.realpath', 'osp.realpath', (['__file__'], {}), '(__file__)\n', (1357, 1367), True, 'import os.path as osp\n'), ((1687, 1709), 'os.path.realpath', 'osp.realpath', (['__file__'], {}), '(__file__)\n', (1699, 1709), True, 'import os.path as osp\n')] |
rtpsw/ibis | ibis/udf/validate.py | d7318fdf87121cd8fadbcf0369a2b217aab3053a | """Validation for UDFs.
Warning: This is an experimental module and API here can change without notice.
DO NOT USE DIRECTLY.
"""
from inspect import Parameter, Signature, signature
from typing import Any, Callable, List
import ibis.common.exceptions as com
from ibis.expr.datatypes import DataType
def _parameter_count(funcsig: Signature) -> int:
"""Get the number of positional-or-keyword or position-only parameters in a
function signature.
Parameters
----------
funcsig : inspect.Signature
A UDF signature
Returns
-------
int
The number of parameters
"""
return sum(
param.kind in {param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY}
for param in funcsig.parameters.values()
if param.default is Parameter.empty
)
def validate_input_type(
input_type: List[DataType], func: Callable
) -> Signature:
"""Check that the declared number of inputs (the length of `input_type`)
and the number of inputs to `func` are equal.
If the signature of `func` uses *args, then no check is done (since no
check can be done).
Parameters
----------
input_type : List[DataType]
func : callable
Returns
-------
inspect.Signature
"""
funcsig = signature(func)
params = funcsig.parameters.values()
# We can only do validation if all the positional arguments are explicit
# (i.e. no *args)
if not any(param.kind is Parameter.VAR_POSITIONAL for param in params):
declared_parameter_count = len(input_type)
function_parameter_count = _parameter_count(funcsig)
if declared_parameter_count != function_parameter_count:
raise TypeError(
'Function signature {!r} has {:d} parameters, '
'input_type has {:d}. These must match. Non-column '
'parameters must be defined as keyword only, i.e., '
'def foo(col, *, function_param).'.format(
func.__name__,
function_parameter_count,
declared_parameter_count,
)
)
return funcsig
def validate_output_type(output_type: Any) -> None:
"""Check that the output type is a single datatype."""
if isinstance(output_type, list):
raise com.IbisTypeError(
'The output type of a UDF must be a single datatype.'
)
| [((1277, 1292), 'inspect.signature', 'signature', (['func'], {}), '(func)\n', (1286, 1292), False, 'from inspect import Parameter, Signature, signature\n'), ((2323, 2395), 'ibis.common.exceptions.IbisTypeError', 'com.IbisTypeError', (['"""The output type of a UDF must be a single datatype."""'], {}), "('The output type of a UDF must be a single datatype.')\n", (2340, 2395), True, 'import ibis.common.exceptions as com\n')] |
stattikcms/stattik | packages/stattik/stattik/schema/schema.py | 5c96d600d105461edb95a11d8050dee3c32edd1e | import inspect
from ariadne import make_executable_schema, QueryType, MutationType, SubscriptionType
from .resolver import *
#
# Schema
#
class GrammarError(Exception):
pass
keywords = ['query', 'mutation', 'subscription', 'source']
class SchemaMetaDict(dict):
'''
Dictionary that allows decorated schema entry functions to be overloaded
'''
def __setitem__(self, key, value):
if key in self and callable(value) and hasattr(value, 'name'):
value.next_func = self[key]
if not hasattr(value.next_func, 'name'):
raise GrammarError(f'Redefinition of {key}. Perhaps an earlier {key} is missing @_')
super().__setitem__(key, value)
def __getitem__(self, key):
#if key not in self and key.isupper() and key[:1] != '_':
if key not in self and key.isupper() and not key[:1] in keywords:
return key.upper()
else:
return super().__getitem__(key)
def _query_decorator(name):
def decorate(func):
func.tag = 'query'
func.name = name
return func
return decorate
def _mutation_decorator(name):
def decorate(func):
func.tag = 'mutation'
func.name = name
return func
return decorate
def _subscription_decorator(name):
def decorate(func):
func.tag = 'subscription'
func.name = name
return func
return decorate
def _source_decorator(name):
def decorate(func):
func.tag = 'source'
func.name = name
return func
return decorate
class SchemaMeta(type):
@classmethod
def __prepare__(meta, *args, **kwargs):
d = SchemaMetaDict()
d['query'] = _query_decorator
d['mutation'] = _mutation_decorator
d['subscription'] = _subscription_decorator
d['source'] = _source_decorator
return d
def __new__(meta, selfname, bases, attributes):
#del attributes['_']
for key in keywords:
del attributes[key]
self = super().__new__(meta, selfname, bases, attributes)
self._build(list(attributes.items()))
return self
class Schema(metaclass=SchemaMeta):
def __init__(self, parent=None):
self.parent = parent
self.children = []
if parent:
parent.add_child(self)
self.db = parent.db
else:
self.db = self
self.entries = self.__class__.entries
@classmethod
def produce(self, parent=None):
schema = self(parent)
return schema
def add_child(self, schema):
self.children.append(schema)
def get_gql(self):
gql = [inspect.getdoc(self)]
for child in self.children:
gql.append(child.get_gql())
return "\n".join(gql)
def register(self):
for entry in self.entries:
entry.register(self)
for child in self.children:
child.register()
def add(self, r):
self.entries.append(r)
@classmethod
def __collect_functions(self, definitions):
'''
Collect all of the tagged grammar entries
'''
entries = [ (name, value) for name, value in definitions
if callable(value) and hasattr(value, 'name') ]
return entries
@classmethod
def _build(self, definitions):
if vars(self).get('_build', False):
return
# Collect all of the entry functions from the class definition
functions = self.__collect_functions(definitions)
self.entries = self.__build_entries(functions)
@classmethod
def __build_entries(self, functions):
entries = []
errors = ''
for name, func in functions:
entry = self._build_entry(func)
entries.append(entry)
return entries
@classmethod
def _build_entry(self, func):
tag = func.tag
name = func.name
prodname = func.__name__
unwrapped = inspect.unwrap(func)
filename = unwrapped.__code__.co_filename
lineno = unwrapped.__code__.co_firstlineno
logger.debug(f"_build_entry:tag: {tag}")
logger.debug(f"_build_entry:name: {name}")
logger.debug(f"_build_entry:prodname: {prodname}")
logger.debug(f"_build_entry:unwrapped: {unwrapped}")
#entry = Resolver(name, func, prodname=prodname, filename=filename, lineno=lineno)
entry = entry_factories[tag](self, name, func, prodname=prodname, filename=filename, lineno=lineno)
logger.debug(f"_build_entry:entry: {entry}")
return entry
# This is for testing or in case you don't want a database as the root schema
class RootSchema(Schema):
"""
type Query {
dummy: Int!
}
type Mutation {
setDummy(val: Int!): Int
}
type Subscription {
dummy: Int
}
"""
instance = None
def __init__(self, parent=None):
super().__init__(parent)
Schema.instance = self
self.query_type = QueryType()
self.mutation_type = MutationType()
self.subscription_type = SubscriptionType()
@classmethod
def produce(self):
if self.instance:
return self.instance
self.instance = schema = self()
return schema
def make_executable(self):
self.register()
#return make_executable_schema(type_defs, self.query)
return make_executable_schema(
self.get_gql(),
self.query_type,
self.mutation_type,
self.subscription_type
) | [((4025, 4045), 'inspect.unwrap', 'inspect.unwrap', (['func'], {}), '(func)\n', (4039, 4045), False, 'import inspect\n'), ((5069, 5080), 'ariadne.QueryType', 'QueryType', ([], {}), '()\n', (5078, 5080), False, 'from ariadne import make_executable_schema, QueryType, MutationType, SubscriptionType\n'), ((5110, 5124), 'ariadne.MutationType', 'MutationType', ([], {}), '()\n', (5122, 5124), False, 'from ariadne import make_executable_schema, QueryType, MutationType, SubscriptionType\n'), ((5158, 5176), 'ariadne.SubscriptionType', 'SubscriptionType', ([], {}), '()\n', (5174, 5176), False, 'from ariadne import make_executable_schema, QueryType, MutationType, SubscriptionType\n'), ((2697, 2717), 'inspect.getdoc', 'inspect.getdoc', (['self'], {}), '(self)\n', (2711, 2717), False, 'import inspect\n')] |
DankMickey/Project-Altis-Educational-Source | toontown/battle/DistributedBattleBaseAI.py | 0a74999fb52d4e690a41b984703119f63c372d20 | import random
from otp.ai.AIBase import *
from direct.distributed.ClockDelta import *
from toontown.battle.BattleBase import *
from toontown.battle.BattleCalculatorAI import *
from toontown.toonbase.ToontownBattleGlobals import *
from toontown.battle.SuitBattleGlobals import *
from pandac.PandaModules import *
from toontown.battle import BattleExperienceAI
from direct.distributed import DistributedObjectAI
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
from direct.directnotify import DirectNotifyGlobal
from toontown.ai import DatabaseObject
from toontown.toon import DistributedToonAI
from toontown.toon import InventoryBase
from toontown.toonbase import ToontownGlobals
from toontown.toon import NPCToons
from otp.ai.MagicWordGlobal import *
from toontown.pets import DistributedPetProxyAI
class DistributedBattleBaseAI(DistributedObjectAI.DistributedObjectAI, BattleBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleBaseAI')
def __init__(self, air, zoneId, finishCallback = None, maxSuits = 4, bossBattle = 0, tutorialFlag = 0, interactivePropTrackBonus = -1):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
self.serialNum = 0
self.zoneId = zoneId
self.maxSuits = maxSuits
self.setBossBattle(bossBattle)
self.tutorialFlag = tutorialFlag
self.interactivePropTrackBonus = interactivePropTrackBonus
self.finishCallback = finishCallback
self.avatarExitEvents = []
self.responses = {}
self.adjustingResponses = {}
self.joinResponses = {}
self.adjustingSuits = []
self.adjustingToons = []
self.numSuitsEver = 0
BattleBase.__init__(self)
self.streetBattle = 1
self.pos = Point3(0, 0, 0)
self.initialSuitPos = Point3(0, 0, 0)
self.toonExp = {}
self.toonOrigQuests = {}
self.toonItems = {}
self.toonOrigMerits = {}
self.toonMerits = {}
self.toonParts = {}
self.battleCalc = BattleCalculatorAI(self, tutorialFlag)
if self.air.suitInvasionManager.getInvading():
mult = getInvasionMultiplier()
self.battleCalc.setSkillCreditMultiplier(mult)
if self.air.holidayManager.isMoreXpHolidayRunning():
mult = getMoreXpHolidayMultiplier()
self.battleCalc.setSkillCreditMultiplier(mult)
self.fsm = None
self.clearAttacks()
self.ignoreFaceOffDone = 0
self.needAdjust = 0
self.movieHasBeenMade = 0
self.movieHasPlayed = 0
self.rewardHasPlayed = 0
self.movieRequested = 0
self.ignoreResponses = 0
self.ignoreAdjustingResponses = 0
self.taskNames = []
self.exitedToons = []
self.suitsKilled = []
self.suitsKilledThisBattle = []
self.suitsKilledPerFloor = []
self.suitsEncountered = []
self.newToons = []
self.newSuits = []
self.numNPCAttacks = 0
self.npcAttacks = {}
self.pets = {}
self.fireCount = 0
self.fsm = ClassicFSM.ClassicFSM('DistributedBattleAI', [State.State('FaceOff', self.enterFaceOff, self.exitFaceOff, ['WaitForInput', 'Resume']),
State.State('WaitForJoin', self.enterWaitForJoin, self.exitWaitForJoin, ['WaitForInput', 'Resume']),
State.State('WaitForInput', self.enterWaitForInput, self.exitWaitForInput, ['MakeMovie', 'Resume']),
State.State('MakeMovie', self.enterMakeMovie, self.exitMakeMovie, ['PlayMovie', 'Resume']),
State.State('PlayMovie', self.enterPlayMovie, self.exitPlayMovie, ['WaitForJoin', 'Reward', 'Resume']),
State.State('Reward', self.enterReward, self.exitReward, ['Resume']),
State.State('Resume', self.enterResume, self.exitResume, []),
State.State('Off', self.enterOff, self.exitOff, ['FaceOff', 'WaitForJoin'])], 'Off', 'Off')
self.joinableFsm = ClassicFSM.ClassicFSM('Joinable', [State.State('Joinable', self.enterJoinable, self.exitJoinable, ['Unjoinable']), State.State('Unjoinable', self.enterUnjoinable, self.exitUnjoinable, ['Joinable'])], 'Unjoinable', 'Unjoinable')
self.joinableFsm.enterInitialState()
self.runableFsm = ClassicFSM.ClassicFSM('Runable', [State.State('Runable', self.enterRunable, self.exitRunable, ['Unrunable']), State.State('Unrunable', self.enterUnrunable, self.exitUnrunable, ['Runable'])], 'Unrunable', 'Unrunable')
self.runableFsm.enterInitialState()
self.adjustFsm = ClassicFSM.ClassicFSM('Adjust', [State.State('Adjusting', self.enterAdjusting, self.exitAdjusting, ['NotAdjusting', 'Adjusting']), State.State('NotAdjusting', self.enterNotAdjusting, self.exitNotAdjusting, ['Adjusting'])], 'NotAdjusting', 'NotAdjusting')
self.adjustFsm.enterInitialState()
self.fsm.enterInitialState()
self.startTime = globalClock.getRealTime()
self.adjustingTimer = Timer()
def clearAttacks(self):
self.toonAttacks = {}
self.suitAttacks = getDefaultSuitAttacks()
def requestDelete(self):
if hasattr(self, 'fsm'):
self.fsm.request('Off')
self.__removeTaskName(self.uniqueName('make-movie'))
DistributedObjectAI.DistributedObjectAI.requestDelete(self)
def delete(self):
self.notify.debug('deleting battle')
self.fsm.request('Off')
self.ignoreAll()
self.__removeAllTasks()
del self.fsm
del self.joinableFsm
del self.runableFsm
del self.adjustFsm
self.__cleanupJoinResponses()
self.timer.stop()
del self.timer
self.adjustingTimer.stop()
del self.adjustingTimer
self.battleCalc.cleanup()
del self.battleCalc
for suit in self.suits:
del suit.battleTrap
del self.finishCallback
for petProxy in self.pets.values():
petProxy.requestDelete()
DistributedObjectAI.DistributedObjectAI.delete(self)
def pause(self):
self.timer.stop()
self.adjustingTimer.stop()
def unpause(self):
self.timer.resume()
self.adjustingTimer.resume()
def abortBattle(self):
self.notify.debug('%s.abortBattle() called.' % self.doId)
toonsCopy = self.toons[:]
for toonId in toonsCopy:
self.__removeToon(toonId)
if self.fsm.getCurrentState().getName() == 'PlayMovie' or self.fsm.getCurrentState().getName() == 'MakeMovie':
self.exitedToons.append(toonId)
self.d_setMembers()
self.b_setState('Resume')
self.__removeAllTasks()
self.timer.stop()
self.adjustingTimer.stop()
def __removeSuit(self, suit):
self.notify.debug('__removeSuit(%d)' % suit.doId)
self.suits.remove(suit)
self.activeSuits.remove(suit)
if self.luredSuits.count(suit) == 1:
self.luredSuits.remove(suit)
self.suitGone = 1
del suit.battleTrap
def findSuit(self, id):
for s in self.suits:
if s.doId == id:
return s
return None
def __removeTaskName(self, name):
if self.taskNames.count(name):
self.taskNames.remove(name)
self.notify.debug('removeTaskName() - %s' % name)
taskMgr.remove(name)
def __removeAllTasks(self):
for n in self.taskNames:
self.notify.debug('removeAllTasks() - %s' % n)
taskMgr.remove(n)
self.taskNames = []
def __removeToonTasks(self, toonId):
name = self.taskName('running-toon-%d' % toonId)
self.__removeTaskName(name)
name = self.taskName('to-pending-av-%d' % toonId)
self.__removeTaskName(name)
def getLevelDoId(self):
return 0
def getBattleCellId(self):
return 0
def getPosition(self):
self.notify.debug('getPosition() - %s' % self.pos)
return [self.pos[0], self.pos[1], self.pos[2]]
def getInitialSuitPos(self):
p = []
p.append(self.initialSuitPos[0])
p.append(self.initialSuitPos[1])
p.append(self.initialSuitPos[2])
return p
def setBossBattle(self, bossBattle):
self.bossBattle = bossBattle
def getBossBattle(self):
return self.bossBattle
def b_setState(self, state):
self.notify.debug('network:setState(%s)' % state)
stime = globalClock.getRealTime() + SERVER_BUFFER_TIME
self.sendUpdate('setState', [state, globalClockDelta.localToNetworkTime(stime)])
self.setState(state)
def setState(self, state):
self.fsm.request(state)
def getState(self):
return [self.fsm.getCurrentState().getName(), globalClockDelta.getRealNetworkTime()]
def d_setMembers(self):
self.notify.debug('network:setMembers()')
self.sendUpdate('setMembers', self.getMembers())
def getMembers(self):
suits = []
for s in self.suits:
suits.append(s.doId)
joiningSuits = ''
for s in self.joiningSuits:
joiningSuits += str(suits.index(s.doId))
pendingSuits = ''
for s in self.pendingSuits:
pendingSuits += str(suits.index(s.doId))
activeSuits = ''
for s in self.activeSuits:
activeSuits += str(suits.index(s.doId))
luredSuits = ''
for s in self.luredSuits:
luredSuits += str(suits.index(s.doId))
suitTraps = ''
for s in self.suits:
if s.battleTrap == NO_TRAP:
suitTraps += '9'
elif s.battleTrap == BattleCalculatorAI.TRAP_CONFLICT:
suitTraps += '9'
else:
suitTraps += str(s.battleTrap)
toons = []
for t in self.toons:
toons.append(t)
joiningToons = ''
for t in self.joiningToons:
joiningToons += str(toons.index(t))
pendingToons = ''
for t in self.pendingToons:
pendingToons += str(toons.index(t))
activeToons = ''
for t in self.activeToons:
activeToons += str(toons.index(t))
runningToons = ''
for t in self.runningToons:
runningToons += str(toons.index(t))
self.notify.debug('getMembers() - suits: %s joiningSuits: %s pendingSuits: %s activeSuits: %s luredSuits: %s suitTraps: %s toons: %s joiningToons: %s pendingToons: %s activeToons: %s runningToons: %s' % (suits,
joiningSuits,
pendingSuits,
activeSuits,
luredSuits,
suitTraps,
toons,
joiningToons,
pendingToons,
activeToons,
runningToons))
return [suits,
joiningSuits,
pendingSuits,
activeSuits,
luredSuits,
suitTraps,
toons,
joiningToons,
pendingToons,
activeToons,
runningToons,
globalClockDelta.getRealNetworkTime()]
def d_adjust(self):
self.notify.debug('network:adjust()')
self.sendUpdate('adjust', [globalClockDelta.getRealNetworkTime()])
def getInteractivePropTrackBonus(self):
return self.interactivePropTrackBonus
def getZoneId(self):
return self.zoneId
def getTaskZoneId(self):
return self.zoneId
def d_setMovie(self):
self.notify.debug('network:setMovie()')
self.sendUpdate('setMovie', self.getMovie())
self.__updateEncounteredCogs()
def getMovie(self):
suitIds = []
for s in self.activeSuits:
suitIds.append(s.doId)
p = [self.movieHasBeenMade]
p.append(self.activeToons)
p.append(suitIds)
for t in self.activeToons:
if t in self.toonAttacks:
ta = self.toonAttacks[t]
index = -1
id = ta[TOON_ID_COL]
if id != -1:
index = self.activeToons.index(id)
track = ta[TOON_TRACK_COL]
if (track == NO_ATTACK or attackAffectsGroup(track, ta[TOON_LVL_COL])) and track != NPCSOS and track != PETSOS:
target = -1
if track == HEAL:
if ta[TOON_LVL_COL] == 1:
ta[TOON_HPBONUS_COL] = random.randint(0, 10000)
elif track == SOS or track == NPCSOS or track == PETSOS:
target = ta[TOON_TGT_COL]
elif track == HEAL:
if self.activeToons.count(ta[TOON_TGT_COL]) != 0:
target = self.activeToons.index(ta[TOON_TGT_COL])
else:
target = -1
elif suitIds.count(ta[TOON_TGT_COL]) != 0:
target = suitIds.index(ta[TOON_TGT_COL])
else:
target = -1
p = p + [index,
track,
ta[TOON_LVL_COL],
target]
p = p + ta[4:]
else:
index = self.activeToons.index(t)
attack = getToonAttack(index)
p = p + attack
for i in range(4 - len(self.activeToons)):
p = p + getToonAttack(-1)
for sa in self.suitAttacks:
index = -1
id = sa[SUIT_ID_COL]
if id != -1:
index = suitIds.index(id)
if sa[SUIT_ATK_COL] == -1:
targetIndex = -1
else:
targetIndex = sa[SUIT_TGT_COL]
if targetIndex == -1:
self.notify.debug('suit attack: %d must be group' % sa[SUIT_ATK_COL])
else:
toonId = self.activeToons[targetIndex]
p = p + [index, sa[SUIT_ATK_COL], targetIndex]
sa[SUIT_TAUNT_COL] = 0
if sa[SUIT_ATK_COL] != -1:
suit = self.findSuit(id)
sa[SUIT_TAUNT_COL] = getAttackTauntIndexFromIndex(suit, sa[SUIT_ATK_COL])
p = p + sa[3:]
return p
def d_setChosenToonAttacks(self):
self.notify.debug('network:setChosenToonAttacks()')
self.sendUpdate('setChosenToonAttacks', self.getChosenToonAttacks())
def getChosenToonAttacks(self):
ids = []
tracks = []
levels = []
targets = []
for t in self.activeToons:
if t in self.toonAttacks:
ta = self.toonAttacks[t]
else:
ta = getToonAttack(t)
ids.append(t)
tracks.append(ta[TOON_TRACK_COL])
levels.append(ta[TOON_LVL_COL])
targets.append(ta[TOON_TGT_COL])
return [ids,
tracks,
levels,
targets]
def d_setBattleExperience(self):
self.notify.debug('network:setBattleExperience()')
self.sendUpdate('setBattleExperience', self.getBattleExperience())
def getBattleExperience(self):
returnValue = BattleExperienceAI.getBattleExperience(4, self.activeToons, self.toonExp, self.battleCalc.toonSkillPtsGained, self.toonOrigQuests, self.toonItems, self.toonOrigMerits, self.toonMerits, self.toonParts, self.suitsKilled, self.helpfulToons)
return returnValue
def getToonUberStatus(self):
fieldList = []
uberIndex = LAST_REGULAR_GAG_LEVEL + 1
for toon in self.activeToons:
toonList = []
for trackIndex in range(MAX_TRACK_INDEX):
toonList.append(toon.inventory.numItem(track, uberIndex))
fieldList.append(encodeUber(toonList))
return fieldList
def addSuit(self, suit):
self.notify.debug('addSuit(%d)' % suit.doId)
self.newSuits.append(suit)
self.suits.append(suit)
suit.battleTrap = NO_TRAP
self.numSuitsEver += 1
def __joinSuit(self, suit):
self.joiningSuits.append(suit)
toPendingTime = MAX_JOIN_T + SERVER_BUFFER_TIME
taskName = self.taskName('to-pending-av-%d' % suit.doId)
self.__addJoinResponse(suit.doId, taskName)
self.taskNames.append(taskName)
taskMgr.doMethodLater(toPendingTime, self.__serverJoinDone, taskName, extraArgs=(suit.doId, taskName))
def __serverJoinDone(self, avId, taskName):
self.notify.debug('join for av: %d timed out on server' % avId)
self.__removeTaskName(taskName)
self.__makeAvPending(avId)
return Task.done
def __makeAvPending(self, avId):
self.notify.debug('__makeAvPending(%d)' % avId)
self.__removeJoinResponse(avId)
self.__removeTaskName(self.taskName('to-pending-av-%d' % avId))
if self.toons.count(avId) > 0:
self.joiningToons.remove(avId)
self.pendingToons.append(avId)
else:
suit = self.findSuit(avId)
if suit != None:
if not suit.isEmpty():
if not self.joiningSuits.count(suit) == 1:
self.notify.warning('__makeAvPending(%d) in zone: %d' % (avId, self.zoneId))
self.notify.warning('toons: %s' % self.toons)
self.notify.warning('joining toons: %s' % self.joiningToons)
self.notify.warning('pending toons: %s' % self.pendingToons)
self.notify.warning('suits: %s' % self.suits)
self.notify.warning('joining suits: %s' % self.joiningSuits)
self.notify.warning('pending suits: %s' % self.pendingSuits)
self.joiningSuits.remove(suit)
self.pendingSuits.append(suit)
else:
self.notify.warning('makeAvPending() %d not in toons or suits' % avId)
return
self.d_setMembers()
self.needAdjust = 1
self.__requestAdjust()
def suitRequestJoin(self, suit):
self.notify.debug('suitRequestJoin(%d)' % suit.getDoId())
if self.suitCanJoin():
self.addSuit(suit)
self.__joinSuit(suit)
self.d_setMembers()
suit.prepareToJoinBattle()
return 1
else:
self.notify.warning('suitRequestJoin() - not joinable - joinable state: %s max suits: %d' % (self.joinableFsm.getCurrentState().getName(), self.maxSuits))
return 0
def addToon(self, avId):
self.notify.debug('addToon(%d)' % avId)
toon = self.getToon(avId)
if toon == None:
return 0
toon.stopToonUp()
event = simbase.air.getAvatarExitEvent(avId)
self.avatarExitEvents.append(event)
self.accept(event, self.__handleUnexpectedExit, extraArgs=[avId])
event = 'inSafezone-%s' % avId
self.avatarExitEvents.append(event)
self.accept(event, self.__handleSuddenExit, extraArgs=[avId, 0])
self.newToons.append(avId)
self.toons.append(avId)
toon = simbase.air.doId2do.get(avId)
if toon:
if hasattr(self, 'doId'):
toon.b_setBattleId(self.doId)
else:
toon.b_setBattleId(-1)
messageToonAdded = 'Battle adding toon %s' % avId
messenger.send(messageToonAdded, [avId])
if self.fsm != None and self.fsm.getCurrentState().getName() == 'PlayMovie':
self.responses[avId] = 1
else:
self.responses[avId] = 0
self.adjustingResponses[avId] = 0
if avId not in self.toonExp:
p = []
for t in Tracks:
p.append(toon.experience.getExp(t))
self.toonExp[avId] = p
if avId not in self.toonOrigMerits:
self.toonOrigMerits[avId] = toon.cogMerits[:]
if avId not in self.toonMerits:
self.toonMerits[avId] = [0,
0,
0,
0,
0]
if avId not in self.toonOrigQuests:
flattenedQuests = []
for quest in toon.quests:
flattenedQuests.extend(quest)
self.toonOrigQuests[avId] = flattenedQuests
if avId not in self.toonItems:
self.toonItems[avId] = ([], [])
return 1
def __joinToon(self, avId, pos):
self.joiningToons.append(avId)
toPendingTime = MAX_JOIN_T + SERVER_BUFFER_TIME
taskName = self.taskName('to-pending-av-%d' % avId)
self.__addJoinResponse(avId, taskName, toon=1)
taskMgr.doMethodLater(toPendingTime, self.__serverJoinDone, taskName, extraArgs=(avId, taskName))
self.taskNames.append(taskName)
def __updateEncounteredCogs(self):
for toon in self.activeToons:
if toon in self.newToons:
for suit in self.activeSuits:
if hasattr(suit, 'dna'):
self.suitsEncountered.append({'type': suit.dna.name,
'activeToons': self.activeToons[:]})
else:
self.notify.warning('Suit has no DNA in zone %s: toons involved = %s' % (self.zoneId, self.activeToons))
return
self.newToons.remove(toon)
for suit in self.activeSuits:
if suit in self.newSuits:
if hasattr(suit, 'dna'):
self.suitsEncountered.append({'type': suit.dna.name,
'activeToons': self.activeToons[:]})
else:
self.notify.warning('Suit has no DNA in zone %s: toons involved = %s' % (self.zoneId, self.activeToons))
return
self.newSuits.remove(suit)
def __makeToonRun(self, toonId, updateAttacks):
self.activeToons.remove(toonId)
self.toonGone = 1
self.runningToons.append(toonId)
taskName = self.taskName('running-toon-%d' % toonId)
taskMgr.doMethodLater(TOON_RUN_T, self.__serverRunDone, taskName, extraArgs=(toonId, updateAttacks, taskName))
self.taskNames.append(taskName)
def __serverRunDone(self, toonId, updateAttacks, taskName):
self.notify.debug('run for toon: %d timed out on server' % toonId)
self.__removeTaskName(taskName)
self.__removeToon(toonId)
self.d_setMembers()
if len(self.toons) == 0:
self.notify.debug('last toon is gone - battle is finished')
self.b_setState('Resume')
else:
if updateAttacks == 1:
self.d_setChosenToonAttacks()
self.needAdjust = 1
self.__requestAdjust()
return Task.done
def __requestAdjust(self):
if not self.fsm:
return
cstate = self.fsm.getCurrentState().getName()
if cstate == 'WaitForInput' or cstate == 'WaitForJoin':
if self.adjustFsm.getCurrentState().getName() == 'NotAdjusting':
if self.needAdjust == 1:
self.d_adjust()
self.adjustingSuits = []
for s in self.pendingSuits:
self.adjustingSuits.append(s)
self.adjustingToons = []
for t in self.pendingToons:
self.adjustingToons.append(t)
self.adjustFsm.request('Adjusting')
else:
self.notify.debug('requestAdjust() - dont need to')
else:
self.notify.debug('requestAdjust() - already adjusting')
else:
self.notify.debug('requestAdjust() - in state: %s' % cstate)
def __handleUnexpectedExit(self, avId):
#TODO: fixme
#disconnectCode = self.air.getAvatarDisconnectReason(avId)
disconnectCode = "placeHolder dc code, need self.air.getAvatarDisconnectReason(avId)"
self.notify.warning('toon: %d exited unexpectedly, reason %s' % (avId, disconnectCode))
#userAborted = disconnectCode == ToontownGlobals.DisconnectCloseWindow
#TODO: fixme
userAborted = False
self.__handleSuddenExit(avId, userAborted)
def __handleSuddenExit(self, avId, userAborted):
self.__removeToon(avId, userAborted=userAborted)
if self.fsm.getCurrentState().getName() == 'PlayMovie' or self.fsm.getCurrentState().getName() == 'MakeMovie':
self.exitedToons.append(avId)
self.d_setMembers()
if len(self.toons) == 0:
self.notify.debug('last toon is gone - battle is finished')
self.__removeAllTasks()
self.timer.stop()
self.adjustingTimer.stop()
self.b_setState('Resume')
else:
self.needAdjust = 1
self.__requestAdjust()
def __removeSuit(self, suit):
self.notify.debug('__removeSuit(%d)' % suit.doId)
self.suits.remove(suit)
self.activeSuits.remove(suit)
if self.luredSuits.count(suit) == 1:
self.luredSuits.remove(suit)
self.suitGone = 1
del suit.battleTrap
def __removeToon(self, toonId, userAborted = 0):
self.notify.debug('__removeToon(%d)' % toonId)
if self.toons.count(toonId) == 0:
return
self.battleCalc.toonLeftBattle(toonId)
self.__removeToonTasks(toonId)
self.toons.remove(toonId)
if self.joiningToons.count(toonId) == 1:
self.joiningToons.remove(toonId)
if self.pendingToons.count(toonId) == 1:
self.pendingToons.remove(toonId)
if self.activeToons.count(toonId) == 1:
activeToonIdx = self.activeToons.index(toonId)
self.notify.debug('removing activeToons[%d], updating suitAttacks SUIT_HP_COL to match' % activeToonIdx)
for i in range(len(self.suitAttacks)):
if activeToonIdx < len(self.suitAttacks[i][SUIT_HP_COL]):
del self.suitAttacks[i][SUIT_HP_COL][activeToonIdx]
else:
self.notify.warning("suitAttacks %d doesn't have an HP column for active toon index %d" % (i, activeToonIdx))
self.activeToons.remove(toonId)
if self.runningToons.count(toonId) == 1:
self.runningToons.remove(toonId)
if self.adjustingToons.count(toonId) == 1:
self.notify.warning('removeToon() - toon: %d was adjusting!' % toonId)
self.adjustingToons.remove(toonId)
self.toonGone = 1
if toonId in self.pets:
self.pets[toonId].requestDelete()
del self.pets[toonId]
self.__removeResponse(toonId)
self.__removeAdjustingResponse(toonId)
self.__removeJoinResponses(toonId)
event = simbase.air.getAvatarExitEvent(toonId)
self.avatarExitEvents.remove(event)
self.ignore(event)
event = 'inSafezone-%s' % toonId
self.avatarExitEvents.remove(event)
self.ignore(event)
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.b_setBattleId(0)
messageToonReleased = 'Battle releasing toon %s' % toon.doId
messenger.send(messageToonReleased, [toon.doId])
if not userAborted:
toon = self.getToon(toonId)
if toon != None:
toon.hpOwnedByBattle = 0
toon.d_setHp(toon.hp)
toon.d_setInventory(toon.inventory.makeNetString())
self.air.cogPageManager.toonEncounteredCogs(toon, self.suitsEncountered, self.getTaskZoneId())
elif len(self.suits) > 0 and not self.streetBattle:
self.notify.info('toon %d aborted non-street battle; clearing inventory and hp.' % toonId)
toon = DistributedToonAI.DistributedToonAI(self.air)
toon.doId = toonId
empty = InventoryBase.InventoryBase(toon)
toon.b_setInventory(empty.makeNetString())
toon.b_setHp(0)
db = DatabaseObject.DatabaseObject(self.air, toonId)
db.storeObject(toon, ['setInventory', 'setHp'])
self.notify.info('killing mem leak from temporary DistributedToonAI %d' % toonId)
toon.deleteDummy()
def getToon(self, toonId):
if toonId in self.air.doId2do:
return self.air.doId2do[toonId]
else:
self.notify.warning('getToon() - toon: %d not in repository!' % toonId)
return
def toonRequestRun(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('ignoring response from toon: %d' % toonId)
return
self.notify.debug('toonRequestRun(%d)' % toonId)
if not self.isRunable():
self.notify.warning('toonRequestRun() - not runable')
return
updateAttacks = 0
if self.activeToons.count(toonId) == 0:
self.notify.warning('toon tried to run, but not found in activeToons: %d' % toonId)
return
for toon in self.activeToons:
if toon in self.toonAttacks:
ta = self.toonAttacks[toon]
track = ta[TOON_TRACK_COL]
level = ta[TOON_LVL_COL]
if ta[TOON_TGT_COL] == toonId or track == HEAL and attackAffectsGroup(track, level) and len(self.activeToons) <= 2:
healerId = ta[TOON_ID_COL]
self.notify.debug('resetting toon: %ds attack' % healerId)
self.toonAttacks[toon] = getToonAttack(toon, track=UN_ATTACK)
self.responses[healerId] = 0
updateAttacks = 1
self.__makeToonRun(toonId, updateAttacks)
self.d_setMembers()
self.needAdjust = 1
self.__requestAdjust()
def toonRequestJoin(self, x, y, z):
toonId = self.air.getAvatarIdFromSender()
self.notify.debug('toonRequestJoin(%d)' % toonId)
self.signupToon(toonId, x, y, z)
def toonDied(self):
toonId = self.air.getAvatarIdFromSender()
self.notify.debug('toonDied(%d)' % toonId)
if toonId in self.toons:
toon = self.getToon(toonId)
if toon:
toon.hp = -1
toon.inventory.zeroInv(1)
self.__handleSuddenExit(toonId, 0)
def signupToon(self, toonId, x, y, z):
if self.toons.count(toonId):
return
if self.toonCanJoin():
if self.addToon(toonId):
self.__joinToon(toonId, Point3(x, y, z))
self.d_setMembers()
else:
self.notify.warning('toonRequestJoin() - not joinable')
self.d_denyLocalToonJoin(toonId)
def d_denyLocalToonJoin(self, toonId):
self.notify.debug('network: denyLocalToonJoin(%d)' % toonId)
self.sendUpdateToAvatarId(toonId, 'denyLocalToonJoin', [])
def resetResponses(self):
self.responses = {}
for t in self.toons:
self.responses[t] = 0
self.ignoreResponses = 0
def allToonsResponded(self):
for t in self.toons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __allPendingActiveToonsResponded(self):
for t in self.pendingToons + self.activeToons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __allActiveToonsResponded(self):
for t in self.activeToons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __removeResponse(self, toonId):
del self.responses[toonId]
if self.ignoreResponses == 0 and len(self.toons) > 0:
currStateName = self.fsm.getCurrentState().getName()
if currStateName == 'WaitForInput':
if self.__allActiveToonsResponded():
self.notify.debug('removeResponse() - dont wait for movie')
self.__requestMovie()
elif currStateName == 'PlayMovie':
if self.__allPendingActiveToonsResponded():
self.notify.debug('removeResponse() - surprise movie done')
self.__movieDone()
elif currStateName == 'Reward' or currStateName == 'BuildingReward':
if self.__allActiveToonsResponded():
self.notify.debug('removeResponse() - surprise reward done')
self.handleRewardDone()
def __resetAdjustingResponses(self):
self.adjustingResponses = {}
for t in self.toons:
self.adjustingResponses[t] = 0
self.ignoreAdjustingResponses = 0
def __allAdjustingToonsResponded(self):
for t in self.toons:
if self.adjustingResponses[t] == 0:
return 0
self.ignoreAdjustingResponses = 1
return 1
def __removeAdjustingResponse(self, toonId):
if toonId in self.adjustingResponses:
del self.adjustingResponses[toonId]
if self.ignoreAdjustingResponses == 0 and len(self.toons) > 0:
if self.__allAdjustingToonsResponded():
self.__adjustDone()
def __addJoinResponse(self, avId, taskName, toon = 0):
if toon == 1:
for jr in self.joinResponses.values():
jr[avId] = 0
self.joinResponses[avId] = {}
for t in self.toons:
self.joinResponses[avId][t] = 0
self.joinResponses[avId]['taskName'] = taskName
def __removeJoinResponses(self, avId):
self.__removeJoinResponse(avId)
removedOne = 0
for j in self.joinResponses.values():
if avId in j:
del j[avId]
removedOne = 1
if removedOne == 1:
for t in self.joiningToons:
if self.__allToonsRespondedJoin(t):
self.__makeAvPending(t)
def __removeJoinResponse(self, avId):
if avId in self.joinResponses:
taskMgr.remove(self.joinResponses[avId]['taskName'])
del self.joinResponses[avId]
def __allToonsRespondedJoin(self, avId):
jr = self.joinResponses[avId]
for t in self.toons:
if jr[t] == 0:
return 0
return 1
def __cleanupJoinResponses(self):
for jr in self.joinResponses.values():
taskMgr.remove(jr['taskName'])
del jr
def adjustDone(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreAdjustingResponses == 1:
self.notify.debug('adjustDone() - ignoring toon: %d' % toonId)
return
elif self.adjustFsm.getCurrentState().getName() != 'Adjusting':
self.notify.warning('adjustDone() - in state %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('adjustDone() - toon: %d not in toon list' % toonId)
return
self.adjustingResponses[toonId] += 1
self.notify.debug('toon: %d done adjusting' % toonId)
if self.__allAdjustingToonsResponded():
self.__adjustDone()
def timeout(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('timeout() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'WaitForInput':
self.notify.warning('timeout() - in state: %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('timeout() - toon: %d not in toon list' % toonId)
return
self.toonAttacks[toonId] = getToonAttack(toonId)
self.d_setChosenToonAttacks()
self.responses[toonId] += 1
self.notify.debug('toon: %d timed out' % toonId)
if self.__allActiveToonsResponded():
self.__requestMovie(timeout=1)
def movieDone(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('movieDone() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'PlayMovie':
self.notify.warning('movieDone() - in state %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('movieDone() - toon: %d not in toon list' % toonId)
return
self.responses[toonId] += 1
self.notify.debug('toon: %d done with movie' % toonId)
if self.__allPendingActiveToonsResponded():
self.__movieDone()
else:
self.timer.stop()
self.timer.startCallback(TIMEOUT_PER_USER, self.__serverMovieDone)
def rewardDone(self):
toonId = self.air.getAvatarIdFromSender()
stateName = self.fsm.getCurrentState().getName()
if self.ignoreResponses == 1:
self.notify.debug('rewardDone() - ignoring toon: %d' % toonId)
return
elif stateName not in ('Reward', 'BuildingReward', 'FactoryReward', 'MintReward', 'StageReward', 'CountryClubReward'):
self.notify.warning('rewardDone() - in state %s' % stateName)
return
elif self.toons.count(toonId) == 0:
self.notify.warning('rewardDone() - toon: %d not in toon list' % toonId)
return
self.responses[toonId] += 1
self.notify.debug('toon: %d done with reward' % toonId)
if self.__allActiveToonsResponded():
self.handleRewardDone()
else:
self.timer.stop()
self.timer.startCallback(TIMEOUT_PER_USER, self.serverRewardDone)
def assignRewards(self):
if self.rewardHasPlayed == 1:
self.notify.debug('handleRewardDone() - reward has already played')
return
self.rewardHasPlayed = 1
BattleExperienceAI.assignRewards(self.activeToons, self.battleCalc.toonSkillPtsGained, self.suitsKilled, self.getTaskZoneId(), self.helpfulToons)
def joinDone(self, avId):
toonId = self.air.getAvatarIdFromSender()
if self.toons.count(toonId) == 0:
self.notify.warning('joinDone() - toon: %d not in toon list' % toonId)
return
if avId not in self.joinResponses:
self.notify.debug('joinDone() - no entry for: %d - ignoring: %d' % (avId, toonId))
return
jr = self.joinResponses[avId]
if toonId in jr:
jr[toonId] += 1
self.notify.debug('client with localToon: %d done joining av: %d' % (toonId, avId))
if self.__allToonsRespondedJoin(avId):
self.__makeAvPending(avId)
def requestAttack(self, track, level, av):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('requestAttack() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'WaitForInput':
self.notify.warning('requestAttack() - in state: %s' % self.fsm.getCurrentState().getName())
return
elif self.activeToons.count(toonId) == 0:
self.notify.warning('requestAttack() - toon: %d not in toon list' % toonId)
return
self.notify.debug('requestAttack(%d, %d, %d, %d)' % (toonId,
track,
level,
av))
toon = self.getToon(toonId)
if toon == None:
self.notify.warning('requestAttack() - no toon: %d' % toonId)
return
validResponse = 1
if track == SOS:
self.notify.debug('toon: %d calls for help' % toonId)
self.air.writeServerEvent('friendSOS', toonId, '%s' % av)
self.toonAttacks[toonId] = getToonAttack(toonId, track=SOS, target=av)
elif track == NPCSOS:
self.notify.debug('toon: %d calls for help' % toonId)
self.air.writeServerEvent('NPCSOS', toonId, '%s' % av)
toon = self.getToon(toonId)
if toon == None:
return
if av in toon.NPCFriendsDict:
npcCollision = 0
if av in self.npcAttacks:
callingToon = self.npcAttacks[av]
if self.activeToons.count(callingToon) == 1:
self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS)
npcCollision = 1
if npcCollision == 0:
self.toonAttacks[toonId] = getToonAttack(toonId, track=NPCSOS, level=5, target=av)
self.numNPCAttacks += 1
self.npcAttacks[av] = toonId
elif track == PETSOS:
self.notify.debug('toon: %d calls for pet: %d' % (toonId, av))
self.air.writeServerEvent('PETSOS', toonId, '%s' % av)
toon = self.getToon(toonId)
if toon == None:
return
if not self.validate(toonId, level in toon.petTrickPhrases, 'requestAttack: invalid pet trickId: %s' % level):
return
self.toonAttacks[toonId] = getToonAttack(toonId, track=PETSOS, level=level, target=av)
elif track == UN_ATTACK:
self.notify.debug('toon: %d changed its mind' % toonId)
self.toonAttacks[toonId] = getToonAttack(toonId, track=UN_ATTACK)
if toonId in self.responses:
self.responses[toonId] = 0
validResponse = 0
elif track == PASS:
self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS)
elif track == FIRE:
if simbase.air.doId2do[toonId].getPinkSlips() < self.getFireCount() + 1:
#Not allowed to fire, force them to pass >:D
self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS)
else:
#Allowed to fire
self.setFireCount(self.fireCount + 1)
self.toonAttacks[toonId] = getToonAttack(toonId, track=FIRE, target=av)
else:
if not self.validate(toonId, track >= 0 and track <= MAX_TRACK_INDEX, 'requestAttack: invalid track %s' % track):
return
if not self.validate(toonId, level >= 0 and level <= MAX_LEVEL_INDEX, 'requestAttack: invalid level %s' % level):
return
if toon.inventory.numItem(track, level) == 0:
self.notify.warning('requestAttack() - toon has no item track: %d level: %d' % (track, level))
self.toonAttacks[toonId] = getToonAttack(toonId)
return
if track == HEAL:
if self.runningToons.count(av) == 1 or attackAffectsGroup(track, level) and len(self.activeToons) < 2:
self.toonAttacks[toonId] = getToonAttack(toonId, track=UN_ATTACK)
validResponse = 0
else:
self.toonAttacks[toonId] = getToonAttack(toonId, track=track, level=level, target=av)
else:
self.toonAttacks[toonId] = getToonAttack(toonId, track=track, level=level, target=av)
if av == -1 and not attackAffectsGroup(track, level):
validResponse = 0
self.d_setChosenToonAttacks()
if validResponse == 1:
self.responses[toonId] += 1
self.notify.debug('toon: %d chose an attack' % toonId)
if self.__allActiveToonsResponded():
self.__requestMovie()
def requestPetProxy(self, av):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('requestPetProxy() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'WaitForInput':
self.notify.warning('requestPetProxy() - in state: %s' % self.fsm.getCurrentState().getName())
return
elif self.activeToons.count(toonId) == 0:
self.notify.warning('requestPetProxy() - toon: %d not in toon list' % toonId)
return
self.notify.debug('requestPetProxy(%s, %s)' % (toonId, av))
toon = self.getToon(toonId)
if toon == None:
self.notify.warning('requestPetProxy() - no toon: %d' % toonId)
return
petId = toon.getPetId()
zoneId = self.zoneId
if petId == av:
if not toonId in self.pets:
def handleGetPetProxy(success, pet, petId = petId, zoneId = zoneId, toonId = toonId):
if success:
petProxy = DistributedPetProxyAI.DistributedPetProxyAI(self.air)
petProxy.setOwnerId(pet.getOwnerId())
petProxy.setPetName(pet.getPetName())
petProxy.setTraitSeed(pet.getTraitSeed())
petProxy.setSafeZone(pet.getSafeZone())
petProxy.setForgetfulness(pet.getForgetfulness())
petProxy.setBoredomThreshold(pet.getBoredomThreshold())
petProxy.setRestlessnessThreshold(pet.getRestlessnessThreshold())
petProxy.setPlayfulnessThreshold(pet.getPlayfulnessThreshold())
petProxy.setLonelinessThreshold(pet.getLonelinessThreshold())
petProxy.setSadnessThreshold(pet.getSadnessThreshold())
petProxy.setFatigueThreshold(pet.getFatigueThreshold())
petProxy.setHungerThreshold(pet.getHungerThreshold())
petProxy.setConfusionThreshold(pet.getConfusionThreshold())
petProxy.setExcitementThreshold(pet.getExcitementThreshold())
petProxy.setAngerThreshold(pet.getAngerThreshold())
petProxy.setSurpriseThreshold(pet.getSurpriseThreshold())
petProxy.setAffectionThreshold(pet.getAffectionThreshold())
petProxy.setHead(pet.getHead())
petProxy.setEars(pet.getEars())
petProxy.setNose(pet.getNose())
petProxy.setTail(pet.getTail())
petProxy.setBodyTexture(pet.getBodyTexture())
petProxy.setColor(pet.getColor())
petProxy.setColorScale(pet.getColorScale())
petProxy.setEyeColor(pet.getEyeColor())
petProxy.setGender(pet.getGender())
petProxy.setLastSeenTimestamp(pet.getLastSeenTimestamp())
petProxy.setBoredom(pet.getBoredom())
petProxy.setRestlessness(pet.getRestlessness())
petProxy.setPlayfulness(pet.getPlayfulness())
petProxy.setLoneliness(pet.getLoneliness())
petProxy.setSadness(pet.getSadness())
petProxy.setAffection(pet.getAffection())
petProxy.setHunger(pet.getHunger())
petProxy.setConfusion(pet.getConfusion())
petProxy.setExcitement(pet.getExcitement())
petProxy.setFatigue(pet.getFatigue())
petProxy.setAnger(pet.getAnger())
petProxy.setSurprise(pet.getSurprise())
petProxy.setTrickAptitudes(pet.getTrickAptitudes())
pet.requestDelete()
def deleted(task):
petProxy.doNotDeallocateChannel = True
petProxy.generateWithRequiredAndId(petId, self.air.districtId, self.zoneId)
petProxy.broadcastDominantMood()
self.pets[toonId] = petProxy
return task.done
self.acceptOnce(self.air.getAvatarExitEvent(petId),
lambda: taskMgr.doMethodLater(0,
deleted, self.uniqueName('petdel-%d' % petId)))
else:
self.notify.warning('error generating petProxy: %s' % petId)
self.getPetProxyObject(petId, handleGetPetProxy)
def suitCanJoin(self):
return len(self.suits) < self.maxSuits and self.isJoinable()
def toonCanJoin(self):
return len(self.toons) < 4 and self.isJoinable()
def __requestMovie(self, timeout = 0):
if self.adjustFsm.getCurrentState().getName() == 'Adjusting':
self.notify.debug('__requestMovie() - in Adjusting')
self.movieRequested = 1
else:
movieDelay = 0
if len(self.activeToons) == 0:
self.notify.warning('only pending toons left in battle %s, toons = %s' % (self.doId, self.toons))
elif len(self.activeSuits) == 0:
self.notify.warning('only pending suits left in battle %s, suits = %s' % (self.doId, self.suits))
elif len(self.activeToons) > 1 and not timeout:
movieDelay = 1
self.fsm.request('MakeMovie')
if movieDelay:
taskMgr.doMethodLater(0.8, self.__makeMovie, self.uniqueName('make-movie'))
self.taskNames.append(self.uniqueName('make-movie'))
else:
self.__makeMovie()
def __makeMovie(self, task = None):
self.notify.debug('makeMovie()')
if self._DOAI_requestedDelete:
self.notify.warning('battle %s requested delete, then __makeMovie was called!' % self.doId)
if hasattr(self, 'levelDoId'):
self.notify.warning('battle %s in level %s' % (self.doId, self.levelDoId))
return
self.__removeTaskName(self.uniqueName('make-movie'))
if self.movieHasBeenMade == 1:
self.notify.debug('__makeMovie() - movie has already been made')
return
self.movieRequested = 0
self.movieHasBeenMade = 1
self.movieHasPlayed = 0
self.rewardHasPlayed = 0
for t in self.activeToons:
if t not in self.toonAttacks:
self.toonAttacks[t] = getToonAttack(t)
attack = self.toonAttacks[t]
if attack[TOON_TRACK_COL] == PASS or attack[TOON_TRACK_COL] == UN_ATTACK:
self.toonAttacks[t] = getToonAttack(t)
if self.toonAttacks[t][TOON_TRACK_COL] != NO_ATTACK:
self.addHelpfulToon(t)
self.battleCalc.calculateRound()
for t in self.activeToons:
self.sendEarnedExperience(t)
toon = self.getToon(t)
if toon != None:
toon.hpOwnedByBattle = 1
if toon.immortalMode:
toon.toonUp(toon.maxHp)
self.d_setMovie()
self.b_setState('PlayMovie')
return Task.done
def sendEarnedExperience(self, toonId):
toon = self.getToon(toonId)
if toon != None:
expList = self.battleCalc.toonSkillPtsGained.get(toonId, None)
if expList == None:
toon.d_setEarnedExperience([])
else:
roundList = []
for exp in expList:
roundList.append(int(exp + 0.5))
toon.d_setEarnedExperience(roundList)
def enterOff(self):
return
def exitOff(self):
return
def enterFaceOff(self):
return
def exitFaceOff(self):
return
def enterWaitForJoin(self):
self.notify.debug('enterWaitForJoin()')
if len(self.activeSuits) > 0:
self.b_setState('WaitForInput')
else:
self.notify.debug('enterWaitForJoin() - no active suits')
self.runableFsm.request('Runable')
self.resetResponses()
self.__requestAdjust()
def exitWaitForJoin(self):
pass
def enterWaitForInput(self):
self.notify.debug('enterWaitForInput()')
self.joinableFsm.request('Joinable')
self.runableFsm.request('Runable')
self.resetResponses()
self.__requestAdjust()
if not self.tutorialFlag:
self.timer.startCallback(SERVER_INPUT_TIMEOUT, self.__serverTimedOut)
self.npcAttacks = {}
for toonId in self.toons:
if bboard.get('autoRestock-%s' % toonId, False):
toon = self.air.doId2do.get(toonId)
if toon is not None:
toon.doRestock(0)
def exitWaitForInput(self):
self.npcAttacks = {}
self.timer.stop()
def __serverTimedOut(self):
self.notify.debug('wait for input timed out on server')
self.ignoreResponses = 1
self.__requestMovie(timeout=1)
def enterMakeMovie(self):
self.notify.debug('enterMakeMovie()')
self.runableFsm.request('Unrunable')
self.resetResponses()
def exitMakeMovie(self):
pass
def enterPlayMovie(self):
self.notify.debug('enterPlayMovie()')
self.joinableFsm.request('Joinable')
self.runableFsm.request('Unrunable')
self.resetResponses()
movieTime = TOON_ATTACK_TIME * (len(self.activeToons) + self.numNPCAttacks) + SUIT_ATTACK_TIME * len(self.activeSuits) + SERVER_BUFFER_TIME
self.numNPCAttacks = 0
self.notify.debug('estimated upper bound of movie time: %f' % movieTime)
self.timer.startCallback(movieTime, self.__serverMovieDone)
def __serverMovieDone(self):
self.notify.debug('movie timed out on server')
self.ignoreResponses = 1
self.__movieDone()
def serverRewardDone(self):
self.notify.debug('reward timed out on server')
self.ignoreResponses = 1
self.handleRewardDone()
def handleRewardDone(self):
self.b_setState('Resume')
def exitPlayMovie(self):
self.timer.stop()
def __movieDone(self):
self.notify.debug('__movieDone() - movie is finished')
if self.movieHasPlayed == 1:
self.notify.debug('__movieDone() - movie had already finished')
return
self.movieHasBeenMade = 0
self.movieHasPlayed = 1
self.ignoreResponses = 1
needUpdate = 0
toonHpDict = {}
for toon in self.activeToons:
toonHpDict[toon] = [0, 0, 0]
actualToon = self.getToon(toon)
self.notify.debug('BEFORE ROUND: toon: %d hp: %d' % (toon, actualToon.hp))
deadSuits = []
trapDict = {}
suitsLuredOntoTraps = []
npcTrapAttacks = []
for activeToon in self.activeToons + self.exitedToons:
if activeToon in self.toonAttacks:
attack = self.toonAttacks[activeToon]
track = attack[TOON_TRACK_COL]
npc_level = None
if track == NPCSOS:
track, npc_level, npc_hp = NPCToons.getNPCTrackLevelHp(attack[TOON_TGT_COL])
if track == None:
track = NPCSOS
elif track == TRAP:
npcTrapAttacks.append(attack)
toon = self.getToon(attack[TOON_ID_COL])
av = attack[TOON_TGT_COL]
if toon != None and av in toon.NPCFriendsDict:
toon.NPCFriendsDict[av] -= 1
if toon.NPCFriendsDict[av] <= 0:
del toon.NPCFriendsDict[av]
toon.d_setNPCFriendsDict(toon.NPCFriendsDict)
continue
if track != NO_ATTACK:
toonId = attack[TOON_ID_COL]
level = attack[TOON_LVL_COL]
if npc_level != None:
level = npc_level
if attack[TOON_TRACK_COL] == NPCSOS:
toon = self.getToon(toonId)
av = attack[TOON_TGT_COL]
if toon != None and av in toon.NPCFriendsDict:
toon.NPCFriendsDict[av] -= 1
if toon.NPCFriendsDict[av] <= 0:
del toon.NPCFriendsDict[av]
toon.d_setNPCFriendsDict(toon.NPCFriendsDict)
elif track == PETSOS:
pass
elif track == FIRE:
pass
elif track != SOS:
toon = self.getToon(toonId)
if toon != None:
check = toon.inventory.useItem(track, level)
if check == -1:
self.air.writeServerEvent('suspicious', toonId, 'Toon generating movie for non-existant gag track %s level %s' % (track, level))
self.notify.warning('generating movie for non-existant gag track %s level %s! avId: %s' % (track, level, toonId))
toon.d_setInventory(toon.inventory.makeNetString())
hps = attack[TOON_HP_COL]
if track == SOS:
self.notify.debug('toon: %d called for help' % toonId)
elif track == NPCSOS:
self.notify.debug('toon: %d called for help' % toonId)
elif track == PETSOS:
self.notify.debug('toon: %d called for pet' % toonId)
for i in range(len(self.activeToons)):
toon = self.getToon(self.activeToons[i])
if toon != None:
if i < len(hps):
hp = hps[i]
if hp > 0:
toonHpDict[toon.doId][0] += hp
self.notify.debug('pet heal: toon: %d healed for hp: %d' % (toon.doId, hp))
else:
self.notify.warning('Invalid targetIndex %s in hps %s.' % (i, hps))
elif track == NPC_RESTOCK_GAGS:
for at in self.activeToons:
toon = self.getToon(at)
if toon != None:
toon.inventory.NPCMaxOutInv(npc_level)
toon.d_setInventory(toon.inventory.makeNetString())
elif track == HEAL:
if levelAffectsGroup(HEAL, level):
for i in range(len(self.activeToons)):
at = self.activeToons[i]
if at != toonId or attack[TOON_TRACK_COL] == NPCSOS:
toon = self.getToon(at)
if toon != None:
if i < len(hps):
hp = hps[i]
else:
self.notify.warning('Invalid targetIndex %s in hps %s.' % (i, hps))
hp = 0
toonHpDict[toon.doId][0] += hp
self.notify.debug('HEAL: toon: %d healed for hp: %d' % (toon.doId, hp))
else:
targetId = attack[TOON_TGT_COL]
toon = self.getToon(targetId)
if toon != None and targetId in self.activeToons:
targetIndex = self.activeToons.index(targetId)
if targetIndex < len(hps):
hp = hps[targetIndex]
else:
self.notify.warning('Invalid targetIndex %s in hps %s.' % (targetIndex, hps))
hp = 0
toonHpDict[toon.doId][0] += hp
elif attackAffectsGroup(track, level, attack[TOON_TRACK_COL]):
for suit in self.activeSuits:
targetIndex = self.activeSuits.index(suit)
if targetIndex < 0 or targetIndex >= len(hps):
self.notify.warning('Got attack (%s, %s) on target suit %s, but hps has only %s entries: %s' % (track,
level,
targetIndex,
len(hps),
hps))
else:
hp = hps[targetIndex]
if hp > 0 and track == LURE:
if suit.battleTrap == UBER_GAG_LEVEL_INDEX:
pass
suit.battleTrap = NO_TRAP
needUpdate = 1
if suit.doId in trapDict:
del trapDict[suit.doId]
if suitsLuredOntoTraps.count(suit) == 0:
suitsLuredOntoTraps.append(suit)
if track == TRAP:
targetId = suit.doId
if targetId in trapDict:
trapDict[targetId].append(attack)
else:
trapDict[targetId] = [attack]
needUpdate = 1
died = attack[SUIT_DIED_COL] & 1 << targetIndex
if died != 0:
if deadSuits.count(suit) == 0:
deadSuits.append(suit)
else:
targetId = attack[TOON_TGT_COL]
target = self.findSuit(targetId)
if target != None:
targetIndex = self.activeSuits.index(target)
if targetIndex < 0 or targetIndex >= len(hps):
self.notify.warning('Got attack (%s, %s) on target suit %s, but hps has only %s entries: %s' % (track,
level,
targetIndex,
len(hps),
hps))
else:
hp = hps[targetIndex]
if track == TRAP:
if targetId in trapDict:
trapDict[targetId].append(attack)
else:
trapDict[targetId] = [attack]
if hp > 0 and track == LURE:
oldBattleTrap = target.battleTrap
if oldBattleTrap == UBER_GAG_LEVEL_INDEX:
pass
target.battleTrap = NO_TRAP
needUpdate = 1
if target.doId in trapDict:
del trapDict[target.doId]
if suitsLuredOntoTraps.count(target) == 0:
suitsLuredOntoTraps.append(target)
if oldBattleTrap == UBER_GAG_LEVEL_INDEX:
for otherSuit in self.activeSuits:
if not otherSuit == target:
otherSuit.battleTrap = NO_TRAP
if otherSuit.doId in trapDict:
del trapDict[otherSuit.doId]
died = attack[SUIT_DIED_COL] & 1 << targetIndex
if died != 0:
if deadSuits.count(target) == 0:
deadSuits.append(target)
self.exitedToons = []
for suitKey in trapDict.keys():
attackList = trapDict[suitKey]
attack = attackList[0]
target = self.findSuit(attack[TOON_TGT_COL])
if attack[TOON_LVL_COL] == UBER_GAG_LEVEL_INDEX:
targetId = suitKey
target = self.findSuit(targetId)
if len(attackList) == 1:
if suitsLuredOntoTraps.count(target) == 0:
self.notify.debug('movieDone() - trap set')
target.battleTrap = attack[TOON_LVL_COL]
needUpdate = 1
else:
target.battleTrap = NO_TRAP
else:
self.notify.debug('movieDone() - traps collided')
if target != None:
target.battleTrap = NO_TRAP
if self.battleCalc.trainTrapTriggered:
self.notify.debug('Train trap triggered, clearing all traps')
for otherSuit in self.activeSuits:
self.notify.debug('suit =%d, oldBattleTrap=%d' % (otherSuit.doId, otherSuit.battleTrap))
otherSuit.battleTrap = NO_TRAP
currLuredSuits = self.battleCalc.getLuredSuits()
if len(self.luredSuits) == len(currLuredSuits):
for suit in self.luredSuits:
if currLuredSuits.count(suit.doId) == 0:
needUpdate = 1
break
else:
needUpdate = 1
self.luredSuits = []
for i in currLuredSuits:
suit = self.air.doId2do[i]
self.luredSuits.append(suit)
self.notify.debug('movieDone() - suit: %d is lured' % i)
for attack in npcTrapAttacks:
track, level, hp = NPCToons.getNPCTrackLevelHp(attack[TOON_TGT_COL])
for suit in self.activeSuits:
if self.luredSuits.count(suit) == 0 and suit.battleTrap == NO_TRAP:
suit.battleTrap = level
needUpdate = 1
for suit in deadSuits:
self.notify.debug('removing dead suit: %d' % suit.doId)
if suit.isDeleted():
self.notify.debug('whoops, suit %d is deleted.' % suit.doId)
else:
self.notify.debug('suit had revives? %d' % suit.getMaxSkeleRevives())
encounter = {'type': suit.dna.name,
'level': suit.getActualLevel(),
'track': suit.dna.dept,
'isSkelecog': suit.getSkelecog(),
'isForeman': suit.isForeman(),
'isVP': 0,
'isCFO': 0,
'isSupervisor': suit.isSupervisor(),
'isVirtual': suit.isVirtual(),
'hasRevives': suit.getMaxSkeleRevives(),
'activeToons': self.activeToons[:]}
self.suitsKilled.append(encounter)
self.suitsKilledThisBattle.append(encounter)
self.air.suitInvasionManager.handleSuitDefeated()
self.__removeSuit(suit)
needUpdate = 1
suit.resume()
lastActiveSuitDied = 0
if len(self.activeSuits) == 0 and len(self.pendingSuits) == 0:
lastActiveSuitDied = 1
for i in range(4):
attack = self.suitAttacks[i][SUIT_ATK_COL]
if attack != NO_ATTACK:
suitId = self.suitAttacks[i][SUIT_ID_COL]
suit = self.findSuit(suitId)
if suit == None:
self.notify.warning('movieDone() - suit: %d is gone!' % suitId)
continue
if not (hasattr(suit, 'dna') and suit.dna):
toonId = self.air.getAvatarIdFromSender()
self.notify.warning('_movieDone avoiding crash, sender=%s but suit has no dna' % toonId)
self.air.writeServerEvent('suspicious', toonId, '_movieDone avoiding crash, suit has no dna')
continue
adict = getSuitAttack(suit.getStyleName(), suit.getLevel(), attack)
hps = self.suitAttacks[i][SUIT_HP_COL]
if adict['group'] == ATK_TGT_GROUP:
for activeToon in self.activeToons:
toon = self.getToon(activeToon)
if toon != None:
targetIndex = self.activeToons.index(activeToon)
toonDied = self.suitAttacks[i][TOON_DIED_COL] & 1 << targetIndex
if targetIndex >= len(hps):
self.notify.warning('DAMAGE: toon %s is no longer in battle!' % activeToon)
else:
hp = hps[targetIndex]
if hp > 0:
self.notify.debug('DAMAGE: toon: %d hit for dmg: %d' % (activeToon, hp))
if toonDied != 0:
toonHpDict[toon.doId][2] = 1
toonHpDict[toon.doId][1] += hp
elif adict['group'] == ATK_TGT_SINGLE:
targetIndex = self.suitAttacks[i][SUIT_TGT_COL]
if targetIndex >= len(self.activeToons):
self.notify.warning('movieDone() - toon: %d gone!' % targetIndex)
break
toonId = self.activeToons[targetIndex]
toon = self.getToon(toonId)
toonDied = self.suitAttacks[i][TOON_DIED_COL] & 1 << targetIndex
if targetIndex >= len(hps):
self.notify.warning('DAMAGE: toon %s is no longer in battle!' % toonId)
else:
hp = hps[targetIndex]
if hp > 0:
self.notify.debug('DAMAGE: toon: %d hit for dmg: %d' % (toonId, hp))
if toonDied != 0:
toonHpDict[toon.doId][2] = 1
toonHpDict[toon.doId][1] += hp
deadToons = []
for activeToon in self.activeToons:
hp = toonHpDict[activeToon]
toon = self.getToon(activeToon)
if toon != None:
self.notify.debug('AFTER ROUND: currtoonHP: %d toonMAX: %d hheal: %d damage: %d' % (toon.hp,
toon.maxHp,
hp[0],
hp[1]))
toon.hpOwnedByBattle = 0
hpDelta = hp[0] - hp[1]
if hpDelta >= 0:
toon.toonUp(hpDelta, quietly=1)
else:
toon.takeDamage(-hpDelta, quietly=1)
if toon.hp <= 0:
self.notify.debug('movieDone() - toon: %d was killed' % activeToon)
toon.inventory.zeroInv(1)
deadToons.append(activeToon)
self.notify.debug('AFTER ROUND: toon: %d setHp: %d' % (toon.doId, toon.hp))
if toon.unlimitedGags:
toon.doRestock(noUber=0, noPaid=0)
for deadToon in deadToons:
self.__removeToon(deadToon)
needUpdate = 1
self.clearAttacks()
self.d_setMovie()
self.d_setChosenToonAttacks()
self.localMovieDone(needUpdate, deadToons, deadSuits, lastActiveSuitDied)
def enterResume(self):
for suit in self.suits:
self.notify.info('battle done, resuming suit: %d' % suit.doId)
if suit.isDeleted():
self.notify.info('whoops, suit %d is deleted.' % suit.doId)
else:
suit.resume()
self.suits = []
self.joiningSuits = []
self.pendingSuits = []
self.adjustingSuits = []
self.activeSuits = []
self.luredSuits = []
for toonId in self.toons:
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.b_setBattleId(0)
messageToonReleased = 'Battle releasing toon %s' % toon.doId
messenger.send(messageToonReleased, [toon.doId])
for exitEvent in self.avatarExitEvents:
self.ignore(exitEvent)
eventMsg = {}
for encounter in self.suitsKilledThisBattle:
cog = encounter['type']
level = encounter['level']
msgName = '%s%s' % (cog, level)
if encounter['isSkelecog']:
msgName += '+'
if msgName in eventMsg:
eventMsg[msgName] += 1
else:
eventMsg[msgName] = 1
msgText = ''
for msgName, count in eventMsg.items():
if msgText != '':
msgText += ','
msgText += '%s%s' % (count, msgName)
self.air.writeServerEvent('battleCogsDefeated', self.doId, '%s|%s' % (msgText, self.getTaskZoneId()))
def exitResume(self):
pass
def isJoinable(self):
return self.joinableFsm.getCurrentState().getName() == 'Joinable'
def enterJoinable(self):
self.notify.debug('enterJoinable()')
def exitJoinable(self):
pass
def enterUnjoinable(self):
self.notify.debug('enterUnjoinable()')
def exitUnjoinable(self):
pass
def isRunable(self):
return self.runableFsm.getCurrentState().getName() == 'Runable'
def enterRunable(self):
self.notify.debug('enterRunable()')
def exitRunable(self):
pass
def enterUnrunable(self):
self.notify.debug('enterUnrunable()')
def exitUnrunable(self):
pass
def __estimateAdjustTime(self):
self.needAdjust = 0
adjustTime = 0
if len(self.pendingSuits) > 0 or self.suitGone == 1:
self.suitGone = 0
pos0 = self.suitPendingPoints[0][0]
pos1 = self.suitPoints[0][0][0]
adjustTime = self.calcSuitMoveTime(pos0, pos1)
if len(self.pendingToons) > 0 or self.toonGone == 1:
self.toonGone = 0
if adjustTime == 0:
pos0 = self.toonPendingPoints[0][0]
pos1 = self.toonPoints[0][0][0]
adjustTime = self.calcToonMoveTime(pos0, pos1)
return adjustTime
def enterAdjusting(self):
self.notify.debug('enterAdjusting()')
self.timer.stop()
self.__resetAdjustingResponses()
self.adjustingTimer.startCallback(self.__estimateAdjustTime() + SERVER_BUFFER_TIME, self.__serverAdjustingDone)
def __serverAdjustingDone(self):
if self.needAdjust == 1:
self.adjustFsm.request('NotAdjusting')
self.__requestAdjust()
else:
self.notify.debug('adjusting timed out on the server')
self.ignoreAdjustingResponses = 1
self.__adjustDone()
def exitAdjusting(self):
currStateName = self.fsm.getCurrentState().getName()
if currStateName == 'WaitForInput':
self.timer.restart()
elif currStateName == 'WaitForJoin':
self.b_setState('WaitForInput')
self.adjustingTimer.stop()
def __addTrainTrapForNewSuits(self):
hasTrainTrap = False
trapInfo = None
for otherSuit in self.activeSuits:
if otherSuit.battleTrap == UBER_GAG_LEVEL_INDEX:
hasTrainTrap = True
if hasTrainTrap:
for curSuit in self.activeSuits:
if not curSuit.battleTrap == UBER_GAG_LEVEL_INDEX:
oldBattleTrap = curSuit.battleTrap
curSuit.battleTrap = UBER_GAG_LEVEL_INDEX
self.battleCalc.addTrainTrapForJoiningSuit(curSuit.doId)
self.notify.debug('setting traintrack trap for joining suit %d oldTrap=%s' % (curSuit.doId, oldBattleTrap))
def __adjustDone(self):
for s in self.adjustingSuits:
self.pendingSuits.remove(s)
self.activeSuits.append(s)
self.adjustingSuits = []
for toon in self.adjustingToons:
if self.pendingToons.count(toon) == 1:
self.pendingToons.remove(toon)
else:
self.notify.warning('adjustDone() - toon: %d not pending!' % toon.doId)
if self.activeToons.count(toon) == 0:
self.activeToons.append(toon)
self.ignoreResponses = 0
self.sendEarnedExperience(toon)
else:
self.notify.warning('adjustDone() - toon: %d already active!' % toon.doId)
self.adjustingToons = []
self.__addTrainTrapForNewSuits()
self.d_setMembers()
self.adjustFsm.request('NotAdjusting')
if self.needAdjust == 1:
self.notify.debug('__adjustDone() - need to adjust again')
self.__requestAdjust()
def enterNotAdjusting(self):
self.notify.debug('enterNotAdjusting()')
if self.movieRequested == 1:
if len(self.activeToons) > 0 and self.__allActiveToonsResponded():
self.__requestMovie()
def exitNotAdjusting(self):
pass
def getPetProxyObject(self, petId, callback):
doneEvent = 'generate-%d' % petId
def handlePetProxyRead(pet):
callback(1, pet)
self.air.sendActivate(petId, self.air.districtId, 0)
self.acceptOnce(doneEvent, handlePetProxyRead)
def _getNextSerialNum(self):
num = self.serialNum
self.serialNum += 1
return num
def setFireCount(self, amount):
self.fireCount = amount
def getFireCount(self):
return self.fireCount
@magicWord(category=CATEGORY_PROGRAMMER)
def skipMovie():
invoker = spellbook.getInvoker()
battleId = invoker.getBattleId()
if not battleId:
return 'You are not currently in a battle!'
battle = simbase.air.doId2do.get(battleId)
battle._DistributedBattleBaseAI__movieDone()
return 'Battle movie skipped.'
| [((947, 1017), 'direct.directnotify.DirectNotifyGlobal.directNotify.newCategory', 'DirectNotifyGlobal.directNotify.newCategory', (['"""DistributedBattleBaseAI"""'], {}), "('DistributedBattleBaseAI')\n", (990, 1017), False, 'from direct.directnotify import DirectNotifyGlobal\n'), ((1167, 1226), 'direct.distributed.DistributedObjectAI.DistributedObjectAI.__init__', 'DistributedObjectAI.DistributedObjectAI.__init__', (['self', 'air'], {}), '(self, air)\n', (1215, 1226), False, 'from direct.distributed import DistributedObjectAI\n'), ((5287, 5346), 'direct.distributed.DistributedObjectAI.DistributedObjectAI.requestDelete', 'DistributedObjectAI.DistributedObjectAI.requestDelete', (['self'], {}), '(self)\n', (5340, 5346), False, 'from direct.distributed import DistributedObjectAI\n'), ((6012, 6064), 'direct.distributed.DistributedObjectAI.DistributedObjectAI.delete', 'DistributedObjectAI.DistributedObjectAI.delete', (['self'], {}), '(self)\n', (6058, 6064), False, 'from direct.distributed import DistributedObjectAI\n'), ((15093, 15342), 'toontown.battle.BattleExperienceAI.getBattleExperience', 'BattleExperienceAI.getBattleExperience', (['(4)', 'self.activeToons', 'self.toonExp', 'self.battleCalc.toonSkillPtsGained', 'self.toonOrigQuests', 'self.toonItems', 'self.toonOrigMerits', 'self.toonMerits', 'self.toonParts', 'self.suitsKilled', 'self.helpfulToons'], {}), '(4, self.activeToons, self.toonExp,\n self.battleCalc.toonSkillPtsGained, self.toonOrigQuests, self.toonItems,\n self.toonOrigMerits, self.toonMerits, self.toonParts, self.suitsKilled,\n self.helpfulToons)\n', (15131, 15342), False, 'from toontown.battle import BattleExperienceAI\n'), ((66394, 66443), 'toontown.toon.NPCToons.getNPCTrackLevelHp', 'NPCToons.getNPCTrackLevelHp', (['attack[TOON_TGT_COL]'], {}), '(attack[TOON_TGT_COL])\n', (66421, 66443), False, 'from toontown.toon import NPCToons\n'), ((3199, 3290), 'direct.fsm.State.State', 'State.State', (['"""FaceOff"""', 'self.enterFaceOff', 'self.exitFaceOff', "['WaitForInput', 'Resume']"], {}), "('FaceOff', self.enterFaceOff, self.exitFaceOff, ['WaitForInput',\n 'Resume'])\n", (3210, 3290), False, 'from direct.fsm import State\n'), ((3297, 3401), 'direct.fsm.State.State', 'State.State', (['"""WaitForJoin"""', 'self.enterWaitForJoin', 'self.exitWaitForJoin', "['WaitForInput', 'Resume']"], {}), "('WaitForJoin', self.enterWaitForJoin, self.exitWaitForJoin, [\n 'WaitForInput', 'Resume'])\n", (3308, 3401), False, 'from direct.fsm import State\n'), ((3407, 3510), 'direct.fsm.State.State', 'State.State', (['"""WaitForInput"""', 'self.enterWaitForInput', 'self.exitWaitForInput', "['MakeMovie', 'Resume']"], {}), "('WaitForInput', self.enterWaitForInput, self.exitWaitForInput,\n ['MakeMovie', 'Resume'])\n", (3418, 3510), False, 'from direct.fsm import State\n'), ((3517, 3612), 'direct.fsm.State.State', 'State.State', (['"""MakeMovie"""', 'self.enterMakeMovie', 'self.exitMakeMovie', "['PlayMovie', 'Resume']"], {}), "('MakeMovie', self.enterMakeMovie, self.exitMakeMovie, [\n 'PlayMovie', 'Resume'])\n", (3528, 3612), False, 'from direct.fsm import State\n'), ((3618, 3725), 'direct.fsm.State.State', 'State.State', (['"""PlayMovie"""', 'self.enterPlayMovie', 'self.exitPlayMovie', "['WaitForJoin', 'Reward', 'Resume']"], {}), "('PlayMovie', self.enterPlayMovie, self.exitPlayMovie, [\n 'WaitForJoin', 'Reward', 'Resume'])\n", (3629, 3725), False, 'from direct.fsm import State\n'), ((3731, 3799), 'direct.fsm.State.State', 'State.State', (['"""Reward"""', 'self.enterReward', 'self.exitReward', "['Resume']"], {}), "('Reward', self.enterReward, self.exitReward, ['Resume'])\n", (3742, 3799), False, 'from direct.fsm import State\n'), ((3810, 3870), 'direct.fsm.State.State', 'State.State', (['"""Resume"""', 'self.enterResume', 'self.exitResume', '[]'], {}), "('Resume', self.enterResume, self.exitResume, [])\n", (3821, 3870), False, 'from direct.fsm import State\n'), ((3881, 3956), 'direct.fsm.State.State', 'State.State', (['"""Off"""', 'self.enterOff', 'self.exitOff', "['FaceOff', 'WaitForJoin']"], {}), "('Off', self.enterOff, self.exitOff, ['FaceOff', 'WaitForJoin'])\n", (3892, 3956), False, 'from direct.fsm import State\n'), ((4035, 4113), 'direct.fsm.State.State', 'State.State', (['"""Joinable"""', 'self.enterJoinable', 'self.exitJoinable', "['Unjoinable']"], {}), "('Joinable', self.enterJoinable, self.exitJoinable, ['Unjoinable'])\n", (4046, 4113), False, 'from direct.fsm import State\n'), ((4115, 4202), 'direct.fsm.State.State', 'State.State', (['"""Unjoinable"""', 'self.enterUnjoinable', 'self.exitUnjoinable', "['Joinable']"], {}), "('Unjoinable', self.enterUnjoinable, self.exitUnjoinable, [\n 'Joinable'])\n", (4126, 4202), False, 'from direct.fsm import State\n'), ((4333, 4407), 'direct.fsm.State.State', 'State.State', (['"""Runable"""', 'self.enterRunable', 'self.exitRunable', "['Unrunable']"], {}), "('Runable', self.enterRunable, self.exitRunable, ['Unrunable'])\n", (4344, 4407), False, 'from direct.fsm import State\n'), ((4409, 4487), 'direct.fsm.State.State', 'State.State', (['"""Unrunable"""', 'self.enterUnrunable', 'self.exitUnrunable', "['Runable']"], {}), "('Unrunable', self.enterUnrunable, self.exitUnrunable, ['Runable'])\n", (4420, 4487), False, 'from direct.fsm import State\n'), ((4618, 4719), 'direct.fsm.State.State', 'State.State', (['"""Adjusting"""', 'self.enterAdjusting', 'self.exitAdjusting', "['NotAdjusting', 'Adjusting']"], {}), "('Adjusting', self.enterAdjusting, self.exitAdjusting, [\n 'NotAdjusting', 'Adjusting'])\n", (4629, 4719), False, 'from direct.fsm import State\n'), ((4716, 4809), 'direct.fsm.State.State', 'State.State', (['"""NotAdjusting"""', 'self.enterNotAdjusting', 'self.exitNotAdjusting', "['Adjusting']"], {}), "('NotAdjusting', self.enterNotAdjusting, self.exitNotAdjusting,\n ['Adjusting'])\n", (4727, 4809), False, 'from direct.fsm import State\n'), ((27757, 27802), 'toontown.toon.DistributedToonAI.DistributedToonAI', 'DistributedToonAI.DistributedToonAI', (['self.air'], {}), '(self.air)\n', (27792, 27802), False, 'from toontown.toon import DistributedToonAI\n'), ((27854, 27887), 'toontown.toon.InventoryBase.InventoryBase', 'InventoryBase.InventoryBase', (['toon'], {}), '(toon)\n', (27881, 27887), False, 'from toontown.toon import InventoryBase\n'), ((27988, 28035), 'toontown.ai.DatabaseObject.DatabaseObject', 'DatabaseObject.DatabaseObject', (['self.air', 'toonId'], {}), '(self.air, toonId)\n', (28017, 28035), False, 'from toontown.ai import DatabaseObject\n'), ((55141, 55190), 'toontown.toon.NPCToons.getNPCTrackLevelHp', 'NPCToons.getNPCTrackLevelHp', (['attack[TOON_TGT_COL]'], {}), '(attack[TOON_TGT_COL])\n', (55168, 55190), False, 'from toontown.toon import NPCToons\n'), ((44749, 44802), 'toontown.pets.DistributedPetProxyAI.DistributedPetProxyAI', 'DistributedPetProxyAI.DistributedPetProxyAI', (['self.air'], {}), '(self.air)\n', (44792, 44802), False, 'from toontown.pets import DistributedPetProxyAI\n'), ((12410, 12434), 'random.randint', 'random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (12424, 12434), False, 'import random\n')] |
HsunGong/Augmented-Advertisement | tracking_test.py | ae9d0f5796c13e837a1a547d888647aeb61f0b04 | # Copyright (c) Group Three-Forest SJTU. All Rights Reserved.
from tracking.tracking import *
# a = tracking_video_rectangle("video/","1.mp4",[[273,352],[266,616],[412,620],[416,369]])
a = tracking_video_rectangle_tovideo("video/","1.mp4", "1.png", [[273,352],[266,616],[412,620],[416,369]], result = 'result__.avi', method_num = 5, edge = 4, middle_halt = 250)
| [] |
katetolstaya/gym-flock | gym_flock/envs/old/flocking_position.py | 3236d1dafcb1b9be0cf78b471672e8becb2d37af | import gym
from gym import spaces, error, utils
from gym.utils import seeding
import numpy as np
from scipy.spatial.distance import pdist, squareform
import configparser
from os import path
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
font = {'family' : 'sans-serif',
'weight' : 'bold',
'size' : 14}
class FlockingEnv(gym.Env):
def __init__(self):
config_file = path.join(path.dirname(__file__), "params_flock.cfg")
config = configparser.ConfigParser()
config.read(config_file)
config = config['flock']
self.fig = None
self.line1 = None
self.filter_len = int(config['filter_length'])
self.nx_system = 4
self.n_nodes = int(config['network_size'])
self.comm_radius = float(config['comm_radius'])
self.dt = float(config['system_dt'])
self.v_max = float(config['max_vel_init'])
self.v_bias = self.v_max # 0.5 * self.v_max
self.r_max = float(config['max_rad_init'])
self.std_dev = float(config['std_dev']) * self.dt
self.pooling = []
if config.getboolean('sum_pooling'):
self.pooling.append(np.nansum)
if config.getboolean('min_pooling'):
self.pooling.append(np.nanmin)
if config.getboolean('max_pooling'):
self.pooling.append(np.nanmax)
self.n_pools = len(self.pooling)
# number of features and outputs
self.n_features = int(config['N_features'])
self.nx = int(self.n_features / self.n_pools / self.filter_len)
self.nu = int(config['N_outputs']) # outputs
self.x_agg = np.zeros((self.n_nodes, self.nx * self.filter_len, self.n_pools))
self.x = np.zeros((self.n_nodes, self.nx_system))
self.u = np.zeros((self.n_nodes, self.nu))
self.mean_vel = np.zeros((self.n_nodes, self.nu))
# TODO
self.max_accel = 40
self.max_z = 200
# self.b = np.ones((self.n_nodes,1))
# self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(self.n_nodes, 2), dtype=np.float32 )
# self.observation_space = spaces.Box(low=-self.max_z, high=self.max_z, shape=(
# self.n_nodes, self.nx * self.filter_len * self.n_pools) , dtype=np.float32)
self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2,) , dtype=np.float32 )
self.observation_space = spaces.Box(low=-self.max_z, high=self.max_z, shape=(self.n_features, ), dtype=np.float32)
self.seed()
def render(self, mode='human'):
if self.fig is None:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
line1, = ax.plot(self.x[:, 0], self.x[:, 1], 'bo') # Returns a tuple of line objects, thus the comma
ax.plot([0], [0], 'kx')
plt.ylim(-1.0 * self.r_max, 1.0 * self.r_max)
plt.xlim(-1.0 * self.r_max, 1.0 * self.r_max)
a = gca()
a.set_xticklabels(a.get_xticks(), font)
a.set_yticklabels(a.get_yticks(), font)
plt.title('GNN Controller')
self.fig = fig
self.line1 = line1
self.line1.set_xdata(self.x[:, 0])
self.line1.set_ydata(self.x[:, 1])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
x = self.x
x_ = np.zeros((self.n_nodes, self.nx_system))
#u = np.vstack((np.zeros((self.n_leaders, 2)), u))
# x position
x_[:, 0] = x[:, 0] + x[:, 2] * self.dt
# y position
x_[:, 1] = x[:, 1] + x[:, 3] * self.dt
# x velocity
x_[:, 2] = x[:, 2] + 0.1 * u[:, 0] * self.dt + np.random.normal(0, self.std_dev,(self.n_nodes,))
# y velocity
x_[:, 3] = x[:, 3] + 0.1 * u[:, 1] * self.dt + np.random.normal(0, self.std_dev,(self.n_nodes,))
# TODO - check the 0.1
self.x = x_
self.x_agg = self.aggregate(self.x, self.x_agg)
self.u = u
return self._get_obs(), -self.instant_cost(), False, {}
def instant_cost(self): # sum of differences in velocities
return np.sum(np.var(self.x[:, 2:4], axis=0)) #+ np.sum(np.square(self.u)) * 0.00001
#return np.sum(np.square(self.x[:,2:4] - self.mean_vel))
def _get_obs(self):
reshaped = self.x_agg.reshape((self.n_nodes, self.n_features))
clipped = np.clip(reshaped, a_min=-self.max_z, a_max=self.max_z)
return clipped #[self.n_leaders:, :]
def reset(self):
x = np.zeros((self.n_nodes, self.nx_system))
degree = 0
min_dist = 0
while degree < 2 or min_dist < 0.1: # < 0.25: # 0.25: #0.5: #min_dist < 0.25:
# randomly initialize the state of all agents
length = np.sqrt(np.random.uniform(0, self.r_max, size=(self.n_nodes,)))
angle = np.pi * np.random.uniform(0, 2, size=(self.n_nodes,))
x[:, 0] = length * np.cos(angle)
x[:, 1] = length * np.sin(angle)
bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,))
x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_nodes,)) + bias[0]
x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_nodes,)) + bias[1]
# compute distances between agents
x_t_loc = x[:, 0:2] # x,y location determines connectivity
a_net = squareform(pdist(x_t_loc.reshape((self.n_nodes, 2)), 'euclidean'))
# no self loops
a_net = a_net + 2 * self.comm_radius * np.eye(self.n_nodes)
# compute minimum distance between agents and degree of network
min_dist = np.min(np.min(a_net))
a_net = a_net < self.comm_radius
degree = np.min(np.sum(a_net.astype(int), axis=1))
self.mean_vel = np.mean(x[:,2:4],axis=0)
self.x = x
self.x_agg = np.zeros((self.n_nodes, self.nx * self.filter_len, self.n_pools))
self.x_agg = self.aggregate(self.x, self.x_agg)
return self._get_obs()
# def render(self, mode='human'):
# pass
def close(self):
pass
def aggregate(self, xt, x_agg):
"""
Perform aggegration operation for all possible pooling operations using helper functions get_pool and get_comms
Args:
x_agg (): Last time step's aggregated info
xt (): Current state of all agents
Returns:
Aggregated state values
"""
x_features = self.get_x_features(xt)
a_net = self.get_connectivity(xt)
for k in range(0, self.n_pools):
comm_data = self.get_comms(np.dstack((x_features, self.get_features(x_agg[:, :, k]))), a_net)
x_agg[:, :, k] = self.get_pool(comm_data, self.pooling[k])
return x_agg
def get_connectivity(self, x):
"""
Get the adjacency matrix of the network based on agent locations by computing pairwise distances using pdist
Args:
x (): current states of all agents
Returns: adjacency matrix of network
"""
x_t_loc = x[:, 0:2] # x,y location determines connectivity
a_net = squareform(pdist(x_t_loc.reshape((self.n_nodes, 2)), 'euclidean'))
a_net = (a_net < self.comm_radius).astype(float)
np.fill_diagonal(a_net, 0)
return a_net
def get_x_features(self, xt): # TODO
"""
Compute the non-linear features necessary for implementing Turner 2003
Args:
xt (): current state of all agents
Returns: matrix of features for each agent
"""
diff = xt.reshape((self.n_nodes, 1, self.nx_system)) - xt.reshape((1, self.n_nodes, self.nx_system))
r2 = np.multiply(diff[:, :, 0], diff[:, :, 0]) + np.multiply(diff[:, :, 1], diff[:, :, 1]) + np.eye(
self.n_nodes)
return np.dstack((diff[:, :, 2], np.divide(diff[:, :, 0], np.multiply(r2, r2)), np.divide(diff[:, :, 0], r2),
diff[:, :, 3], np.divide(diff[:, :, 1], np.multiply(r2, r2)), np.divide(diff[:, :, 1], r2)))
def get_features(self, agg):
"""
Matrix of
Args:
agg (): the aggregated matrix from the last time step
Returns: matrix of aggregated features from all nodes at current time
"""
return np.tile(agg[:, :-self.nx].reshape((self.n_nodes, 1, -1)), (1, self.n_nodes, 1)) # TODO check indexing
def get_comms(self, mat, a_net):
"""
Enforces that agents who are not connected in the network cannot observe each others' states
Args:
mat (): matrix of state information for the whole graph
a_net (): adjacency matrix for flock network (weighted networks unsupported for now)
Returns:
mat (): sparse matrix with NaN values where agents can't communicate
"""
a_net[a_net == 0] = np.nan
return mat * a_net.reshape(self.n_nodes, self.n_nodes, 1)
def get_pool(self, mat, func):
"""
Perform pooling operations on the matrix of state information. The replacement of values with NaNs for agents who
can't communicate must already be enforced.
Args:
mat (): matrix of state information
func (): pooling function (np.nansum(), np.nanmin() or np.nanmax()). Must ignore NaNs.
Returns:
information pooled from neighbors for each agent
"""
return func(mat, axis=1).reshape((self.n_nodes, self.n_features)) # TODO check this axis = 1
def controller(self):
"""
The controller for flocking from Turner 2003.
Args:
x (): the current state
Returns: the optimal action
"""
x = self.x
s_diff = x.reshape((self.n_nodes, 1, self.nx_system)) - x.reshape((1, self.n_nodes, self.nx_system))
r2 = np.multiply(s_diff[:, :, 0], s_diff[:, :, 0]) + np.multiply(s_diff[:, :, 1], s_diff[:, :, 1]) + np.eye(
self.n_nodes)
p = np.dstack((s_diff, self.potential_grad(s_diff[:, :, 0], r2), self.potential_grad(s_diff[:, :, 1], r2)))
p_sum = np.nansum(p, axis=1).reshape((self.n_nodes, self.nx_system + 2))
return np.hstack(((- p_sum[:, 4] - p_sum[:, 2]).reshape((-1, 1)), (- p_sum[:, 3] - p_sum[:, 5]).reshape(-1, 1)))
def potential_grad(self, pos_diff, r2):
"""
Computes the gradient of the potential function for flocking proposed in Turner 2003.
Args:
pos_diff (): difference in a component of position among all agents
r2 (): distance squared between agents
Returns: corresponding component of the gradient of the potential
"""
grad = -2.0 * np.divide(pos_diff, np.multiply(r2, r2)) + 2 * np.divide(pos_diff, r2)
grad[r2 > self.comm_radius] = 0
return grad
| [((488, 515), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (513, 515), False, 'import configparser\n'), ((1655, 1720), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nx * self.filter_len, self.n_pools)'], {}), '((self.n_nodes, self.nx * self.filter_len, self.n_pools))\n', (1663, 1720), True, 'import numpy as np\n'), ((1738, 1778), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nx_system)'], {}), '((self.n_nodes, self.nx_system))\n', (1746, 1778), True, 'import numpy as np\n'), ((1796, 1829), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nu)'], {}), '((self.n_nodes, self.nu))\n', (1804, 1829), True, 'import numpy as np\n'), ((1854, 1887), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nu)'], {}), '((self.n_nodes, self.nu))\n', (1862, 1887), True, 'import numpy as np\n'), ((2336, 2423), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-self.max_accel)', 'high': 'self.max_accel', 'shape': '(2,)', 'dtype': 'np.float32'}), '(low=-self.max_accel, high=self.max_accel, shape=(2,), dtype=np.\n float32)\n', (2346, 2423), False, 'from gym import spaces, error, utils\n'), ((2454, 2546), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-self.max_z)', 'high': 'self.max_z', 'shape': '(self.n_features,)', 'dtype': 'np.float32'}), '(low=-self.max_z, high=self.max_z, shape=(self.n_features,),\n dtype=np.float32)\n', (2464, 2546), False, 'from gym import spaces, error, utils\n'), ((3434, 3457), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (3451, 3457), False, 'from gym.utils import seeding\n'), ((3536, 3576), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nx_system)'], {}), '((self.n_nodes, self.nx_system))\n', (3544, 3576), True, 'import numpy as np\n'), ((4556, 4610), 'numpy.clip', 'np.clip', (['reshaped'], {'a_min': '(-self.max_z)', 'a_max': 'self.max_z'}), '(reshaped, a_min=-self.max_z, a_max=self.max_z)\n', (4563, 4610), True, 'import numpy as np\n'), ((4690, 4730), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nx_system)'], {}), '((self.n_nodes, self.nx_system))\n', (4698, 4730), True, 'import numpy as np\n'), ((6098, 6163), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nx * self.filter_len, self.n_pools)'], {}), '((self.n_nodes, self.nx * self.filter_len, self.n_pools))\n', (6106, 6163), True, 'import numpy as np\n'), ((7520, 7546), 'numpy.fill_diagonal', 'np.fill_diagonal', (['a_net', '(0)'], {}), '(a_net, 0)\n', (7536, 7546), True, 'import numpy as np\n'), ((427, 449), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (439, 449), False, 'from os import path\n'), ((2644, 2653), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2651, 2653), True, 'import matplotlib.pyplot as plt\n'), ((2672, 2684), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2682, 2684), True, 'import matplotlib.pyplot as plt\n'), ((2885, 2930), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.0 * self.r_max)', '(1.0 * self.r_max)'], {}), '(-1.0 * self.r_max, 1.0 * self.r_max)\n', (2893, 2930), True, 'import matplotlib.pyplot as plt\n'), ((2943, 2988), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.0 * self.r_max)', '(1.0 * self.r_max)'], {}), '(-1.0 * self.r_max, 1.0 * self.r_max)\n', (2951, 2988), True, 'import matplotlib.pyplot as plt\n'), ((3005, 3010), 'matplotlib.pyplot.gca', 'gca', ([], {}), '()\n', (3008, 3010), False, 'from matplotlib.pyplot import gca\n'), ((3127, 3154), 'matplotlib.pyplot.title', 'plt.title', (['"""GNN Controller"""'], {}), "('GNN Controller')\n", (3136, 3154), True, 'import matplotlib.pyplot as plt\n'), ((3850, 3900), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.std_dev', '(self.n_nodes,)'], {}), '(0, self.std_dev, (self.n_nodes,))\n', (3866, 3900), True, 'import numpy as np\n'), ((3977, 4027), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.std_dev', '(self.n_nodes,)'], {}), '(0, self.std_dev, (self.n_nodes,))\n', (3993, 4027), True, 'import numpy as np\n'), ((4306, 4336), 'numpy.var', 'np.var', (['self.x[:, 2:4]'], {'axis': '(0)'}), '(self.x[:, 2:4], axis=0)\n', (4312, 4336), True, 'import numpy as np\n'), ((5188, 5252), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.v_bias)', 'high': 'self.v_bias', 'size': '(2,)'}), '(low=-self.v_bias, high=self.v_bias, size=(2,))\n', (5205, 5252), True, 'import numpy as np\n'), ((6032, 6058), 'numpy.mean', 'np.mean', (['x[:, 2:4]'], {'axis': '(0)'}), '(x[:, 2:4], axis=0)\n', (6039, 6058), True, 'import numpy as np\n'), ((8038, 8058), 'numpy.eye', 'np.eye', (['self.n_nodes'], {}), '(self.n_nodes)\n', (8044, 8058), True, 'import numpy as np\n'), ((10212, 10232), 'numpy.eye', 'np.eye', (['self.n_nodes'], {}), '(self.n_nodes)\n', (10218, 10232), True, 'import numpy as np\n'), ((4948, 5002), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.r_max'], {'size': '(self.n_nodes,)'}), '(0, self.r_max, size=(self.n_nodes,))\n', (4965, 5002), True, 'import numpy as np\n'), ((5032, 5077), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)'], {'size': '(self.n_nodes,)'}), '(0, 2, size=(self.n_nodes,))\n', (5049, 5077), True, 'import numpy as np\n'), ((5109, 5122), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5115, 5122), True, 'import numpy as np\n'), ((5154, 5167), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5160, 5167), True, 'import numpy as np\n'), ((5275, 5348), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.v_max)', 'high': 'self.v_max', 'size': '(self.n_nodes,)'}), '(low=-self.v_max, high=self.v_max, size=(self.n_nodes,))\n', (5292, 5348), True, 'import numpy as np\n'), ((5381, 5454), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.v_max)', 'high': 'self.v_max', 'size': '(self.n_nodes,)'}), '(low=-self.v_max, high=self.v_max, size=(self.n_nodes,))\n', (5398, 5454), True, 'import numpy as np\n'), ((5880, 5893), 'numpy.min', 'np.min', (['a_net'], {}), '(a_net)\n', (5886, 5893), True, 'import numpy as np\n'), ((7950, 7995), 'numpy.multiply', 'np.multiply', (['diff[:, :, (0)]', 'diff[:, :, (0)]'], {}), '(diff[:, :, (0)], diff[:, :, (0)])\n', (7961, 7995), True, 'import numpy as np\n'), ((7994, 8039), 'numpy.multiply', 'np.multiply', (['diff[:, :, (1)]', 'diff[:, :, (1)]'], {}), '(diff[:, :, (1)], diff[:, :, (1)])\n', (8005, 8039), True, 'import numpy as np\n'), ((8160, 8190), 'numpy.divide', 'np.divide', (['diff[:, :, (0)]', 'r2'], {}), '(diff[:, :, (0)], r2)\n', (8169, 8190), True, 'import numpy as np\n'), ((8278, 8308), 'numpy.divide', 'np.divide', (['diff[:, :, (1)]', 'r2'], {}), '(diff[:, :, (1)], r2)\n', (8287, 8308), True, 'import numpy as np\n'), ((10116, 10165), 'numpy.multiply', 'np.multiply', (['s_diff[:, :, (0)]', 's_diff[:, :, (0)]'], {}), '(s_diff[:, :, (0)], s_diff[:, :, (0)])\n', (10127, 10165), True, 'import numpy as np\n'), ((10164, 10213), 'numpy.multiply', 'np.multiply', (['s_diff[:, :, (1)]', 's_diff[:, :, (1)]'], {}), '(s_diff[:, :, (1)], s_diff[:, :, (1)])\n', (10175, 10213), True, 'import numpy as np\n'), ((10378, 10398), 'numpy.nansum', 'np.nansum', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (10387, 10398), True, 'import numpy as np\n'), ((11017, 11040), 'numpy.divide', 'np.divide', (['pos_diff', 'r2'], {}), '(pos_diff, r2)\n', (11026, 11040), True, 'import numpy as np\n'), ((5752, 5772), 'numpy.eye', 'np.eye', (['self.n_nodes'], {}), '(self.n_nodes)\n', (5758, 5772), True, 'import numpy as np\n'), ((8138, 8157), 'numpy.multiply', 'np.multiply', (['r2', 'r2'], {}), '(r2, r2)\n', (8149, 8157), True, 'import numpy as np\n'), ((8256, 8275), 'numpy.multiply', 'np.multiply', (['r2', 'r2'], {}), '(r2, r2)\n', (8267, 8275), True, 'import numpy as np\n'), ((10990, 11009), 'numpy.multiply', 'np.multiply', (['r2', 'r2'], {}), '(r2, r2)\n', (11001, 11009), True, 'import numpy as np\n')] |
codingwangfeng/GoodGoodName | conf/constants.py | 02bfeb3ae65fd9ba0354f5b67237fcad4c0e11cb | # -*-coding:utf-8-*-
# from functools import reduce
from functools import reduce
SANCAI_jixiang = [1, 3, 5, 7, 8, 11, 13, 15, 16, 18, 21, 23, 24, 25, 31, 32, 33, 35, 37, 39, 41, 45, 47, 48, 52, 57, 61,
63,
65, 67, 68, 81] # 吉祥运暗示数(代表健全,幸福,名誉等)
SANCAI_xiaoji = [6, 17, 26, 27, 29, 30, 38, 49, 51, 55, 58, 71, 73, 75] # 次吉祥运暗示数(代表多少有些障碍,但能获得吉运)
SANCAI_xiong = [2, 4, 9, 10, 12, 14, 19, 20, 22, 28, 34, 36, 40, 42, 43, 44, 46, 50, 53, 54, 56, 59, 60, 62, 64, 66, 69,
70,
72, 74, 76, 77, 78, 79, 80] # 凶数运暗示数(代表逆境,沉浮,薄弱,病难,困难,多灾等)
SANCAI_wise = [3, 13, 16, 21, 23, 29, 31, 37, 39, 41, 45, 47] # 首领运暗示数(智慧 )仁勇全备,立上位,能领导众人)
SANCAI_wealth = [15, 16, 24, 29, 32, 33, 41, 52] # 财富运暗示数(多钱财,富贵,白手可获巨财)
SANCAI_artist = [13, 14, 18, 26, 29, 33, 35, 38, 48] # 艺能运暗示数(富有艺术天才,对审美,艺术,演艺,体育有通达之能)
SANCAI_goodwife = [5, 6, 11, 13, 15, 16, 24, 32, 35] # 女德运暗示数(具有妇德,品性温良,助夫爱子)
SANCAI_death = [21, 23, 26, 28, 29, 33, 39] # 女性孤寡运暗示数(难觅夫君,家庭不和,夫妻两虎相斗,离婚,严重者夫妻一方早亡)
SANCAI_alone = [4, 10, 12, 14, 22, 28, 34] # 孤独运暗示数(妻凌夫或夫克妻)
SANCAI_merry = [5, 6, 15, 16, 32, 39, 41] # 双妻运暗示数
SANCAI_stubbon = [7, 17, 18, 25, 27, 28, 37, 47] # 刚情运暗示数(性刚固执,意气用事)
SANCAI_gentle = [5, 6, 11, 15, 16, 24, 31, 32, 35] # 温和运暗示数(性情平和,能得上下信望)
# 可以自己配置觉得好的数字
# 参考好的搭配
refer_good_num_list = [SANCAI_jixiang, SANCAI_xiaoji, SANCAI_wise, SANCAI_wealth, SANCAI_artist, SANCAI_goodwife,
SANCAI_merry, SANCAI_gentle]
# 自己设定的好的搭配
good_num_list = [SANCAI_jixiang, SANCAI_xiaoji, SANCAI_wise, SANCAI_wealth, SANCAI_artist, SANCAI_goodwife,
SANCAI_merry, SANCAI_gentle]
# 参考坏的搭配
refer_bad_num_list = [SANCAI_xiong, SANCAI_death, SANCAI_alone, SANCAI_stubbon]
# 自己设定的坏的搭配
bad_num_list = [SANCAI_xiong, SANCAI_death, SANCAI_alone]
good_num_set = set(reduce((lambda x, y: x + y), good_num_list, []))
bad_num_set = set(reduce((lambda x, y: x + y), bad_num_list, []))
print('五格好分值:', good_num_set)
print('五格差分值:', bad_num_set)
# 筛选出有好没坏的三才五格
best_num_set = [x for x in good_num_set if x not in bad_num_set]
print('想要的三才五格数字:', best_num_set)
RESULT_UNKNOWN = '结果未知'
| [((1814, 1859), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'good_num_list', '[]'], {}), '(lambda x, y: x + y, good_num_list, [])\n', (1820, 1859), False, 'from functools import reduce\n'), ((1881, 1925), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'bad_num_list', '[]'], {}), '(lambda x, y: x + y, bad_num_list, [])\n', (1887, 1925), False, 'from functools import reduce\n')] |
kevinmuturi5/farm-Management-system | main/migrations/0006_labourer_allproj.py | 61929d7998d92d56daac67c2f8ace3cc76b6ee8b | # Generated by Django 3.1.2 on 2020-10-18 16:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0005_auto_20201018_1902'),
]
operations = [
migrations.AddField(
model_name='labourer',
name='allproj',
field=models.ManyToManyField(blank=True, to='main.Listing'),
),
]
| [((334, 387), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'to': '"""main.Listing"""'}), "(blank=True, to='main.Listing')\n", (356, 387), False, 'from django.db import migrations, models\n')] |
beachwood23/taurus | bzt/modules/blazemeter/blazemeter_reporter.py | 698ac747bae5d4940a879a8526add67c11ef42da | """
Module for reporting into http://www.blazemeter.com/ service
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import logging
import os
import platform
import sys
import time
import traceback
import zipfile
from collections import defaultdict, OrderedDict
from io import BytesIO
from urllib.error import HTTPError
import requests
from bzt import TaurusInternalException, TaurusConfigError, TaurusNetworkError
from bzt.bza import User, Session, Test
from bzt.engine import Reporter, Singletone
from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time
from bzt.modules.aggregator import AggregatorListener, DataPoint, KPISet, ResultsProvider, ConsolidatingAggregator
from bzt.modules.monitoring import Monitoring, MonitoringListener
from bzt.modules.blazemeter.project_finder import ProjectFinder
from bzt.modules.blazemeter.const import NOTE_SIZE_LIMIT
class BlazeMeterUploader(Reporter, AggregatorListener, MonitoringListener, Singletone):
"""
Reporter class
:type _test: bzt.bza.Test
:type _master: bzt.bza.Master
:type _session: bzt.bza.Session
"""
def __init__(self):
super(BlazeMeterUploader, self).__init__()
self.browser_open = 'start'
self.kpi_buffer = []
self.send_interval = 30
self._last_status_check = time.time()
self.send_data = True
self.upload_artifacts = True
self.send_monitoring = True
self.monitoring_buffer = None
self.public_report = False
self.last_dispatch = 0
self.results_url = None
self._user = User()
self._test = None
self._master = None
self._session = None
self.first_ts = sys.maxsize
self.last_ts = 0
self.report_name = None
self._dpoint_serializer = DatapointSerializer(self)
def prepare(self):
"""
Read options for uploading, check that they're sane
"""
super(BlazeMeterUploader, self).prepare()
self.send_interval = dehumanize_time(self.settings.get("send-interval", self.send_interval))
self.send_monitoring = self.settings.get("send-monitoring", self.send_monitoring)
monitoring_buffer_limit = self.settings.get("monitoring-buffer-limit", 500)
self.monitoring_buffer = MonitoringBuffer(monitoring_buffer_limit, self.log)
self.browser_open = self.settings.get("browser-open", self.browser_open)
self.public_report = self.settings.get("public-report", self.public_report)
self.upload_artifacts = self.parameters.get("upload-artifacts", self.upload_artifacts)
self._dpoint_serializer.multi = self.settings.get("report-times-multiplier", self._dpoint_serializer.multi)
token = self.settings.get("token", "")
if not token:
self.log.warning("No BlazeMeter API key provided, will upload anonymously")
self._user.token = token
# usual fields
self._user.logger_limit = self.settings.get("request-logging-limit", self._user.logger_limit)
self._user.address = self.settings.get("address", self._user.address).rstrip("/")
self._user.data_address = self.settings.get("data-address", self._user.data_address).rstrip("/")
self._user.timeout = dehumanize_time(self.settings.get("timeout", self._user.timeout))
if isinstance(self._user.http_session, requests.Session):
self.log.debug("Installing http client")
self._user.http_session = self.engine.get_http_client()
self._user.http_request = self._user.http_session.request
# direct data feeding case
sess_id = self.parameters.get("session-id")
if sess_id:
self._session = Session(self._user, {'id': sess_id})
self._session['userId'] = self.parameters.get("user-id", None)
self._session['testId'] = self.parameters.get("test-id", None)
self._test = Test(self._user, {'id': self._session['testId']})
exc = TaurusConfigError("Need signature for session")
self._session.data_signature = self.parameters.get("signature", exc)
self._session.kpi_target = self.parameters.get("kpi-target", self._session.kpi_target)
self.send_data = self.parameters.get("send-data", self.send_data)
else:
try:
self._user.ping() # to check connectivity and auth
except HTTPError:
self.log.error("Cannot reach online results storage, maybe the address/token is wrong")
raise
if token:
wsp = self._user.accounts().workspaces()
if not wsp:
raise TaurusNetworkError("Your account has no active workspaces, please contact BlazeMeter support")
finder = ProjectFinder(self.parameters, self.settings, self._user, wsp, self.log)
self._test = finder.resolve_external_test()
else:
self._test = Test(self._user, {'id': None})
self.report_name = self.parameters.get("report-name", self.settings.get("report-name", self.report_name))
if self.report_name == 'ask' and sys.stdin.isatty():
self.report_name = input("Please enter report-name: ")
if isinstance(self.engine.aggregator, ResultsProvider):
self.engine.aggregator.add_listener(self)
for service in self.engine.services:
if isinstance(service, Monitoring):
service.add_listener(self)
def startup(self):
"""
Initiate online test
"""
super(BlazeMeterUploader, self).startup()
self._user.log = self.log.getChild(self.__class__.__name__)
if not self._session:
url = self._start_online()
self.log.info("Started data feeding: %s", url)
if self.browser_open in ('start', 'both'):
open_browser(url)
if self._user.token and self.public_report:
report_link = self._master.make_report_public()
self.log.info("Public report link: %s", report_link)
def _start_online(self):
"""
Start online test
"""
self.log.info("Initiating data feeding...")
if self._test['id']:
self._session, self._master = self._test.start_external()
else:
self._session, self._master, self.results_url = self._test.start_anonymous_external_test()
self._test['id'] = self._session['testId']
if self._test.token:
self.results_url = self._master.address + '/app/#/masters/%s' % self._master['id']
if self.report_name:
self._session.set({"name": str(self.report_name)})
return self.results_url
def __get_jtls_and_more(self):
"""
Compress all files in artifacts dir to single zipfile
:rtype: (io.BytesIO,dict)
"""
mfile = BytesIO()
listing = {}
logs = set()
for handler in self.engine.log.parent.handlers:
if isinstance(handler, logging.FileHandler):
logs.add(handler.baseFilename)
max_file_size = self.settings.get('artifact-upload-size-limit', 10) * 1024 * 1024 # 10MB
with zipfile.ZipFile(mfile, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zfh:
for root, _, files in os.walk(self.engine.artifacts_dir):
for filename in files:
full_path = os.path.join(root, filename)
if full_path in logs:
logs.remove(full_path)
fsize = os.path.getsize(full_path)
if fsize <= max_file_size:
zfh.write(full_path, os.path.join(os.path.relpath(root, self.engine.artifacts_dir), filename))
listing[full_path] = fsize
else:
msg = "File %s exceeds maximum size quota of %s and won't be included into upload"
self.log.warning(msg, filename, max_file_size)
for filename in logs: # upload logs unconditionally
zfh.write(filename, os.path.basename(filename))
listing[filename] = os.path.getsize(filename)
return mfile, listing
def __upload_artifacts(self):
"""
If token provided, upload artifacts folder contents and bzt.log
"""
if not self._session.token:
return
worker_index = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL')
if worker_index:
suffix = '-%s' % worker_index
else:
suffix = ''
artifacts_zip = "artifacts%s.zip" % suffix
mfile, zip_listing = self.__get_jtls_and_more()
self.log.info("Uploading all artifacts as %s ...", artifacts_zip)
self._session.upload_file(artifacts_zip, mfile.getvalue())
self._session.upload_file(artifacts_zip + '.tail.bz', self.__format_listing(zip_listing))
handlers = self.engine.log.parent.handlers
for handler in handlers:
if isinstance(handler, logging.FileHandler):
fname = handler.baseFilename
self.log.info("Uploading %s", fname)
fhead, ftail = os.path.splitext(os.path.split(fname)[-1])
modified_name = fhead + suffix + ftail
with open(fname, 'rb') as _file:
self._session.upload_file(modified_name, _file.read())
_file.seek(-4096, 2)
tail = _file.read()
tail = tail[tail.index(b("\n")) + 1:]
self._session.upload_file(modified_name + ".tail.bz", tail)
def post_process(self):
"""
Upload results if possible
"""
if not self._session:
self.log.debug("No feeding session obtained, nothing to finalize")
return
self.log.debug("KPI bulk buffer len in post-proc: %s", len(self.kpi_buffer))
try:
self.log.info("Sending remaining KPI data to server...")
if self.send_data:
self.__send_data(self.kpi_buffer, False, True)
self.kpi_buffer = []
if self.send_monitoring:
self.__send_monitoring()
finally:
self._postproc_phase2()
if self.results_url:
if self.browser_open in ('end', 'both'):
open_browser(self.results_url)
self.log.info("Online report link: %s", self.results_url)
def _postproc_phase2(self):
try:
if self.upload_artifacts:
self.__upload_artifacts()
except (IOError, TaurusNetworkError):
self.log.warning("Failed artifact upload: %s", traceback.format_exc())
finally:
self._last_status_check = self.parameters.get('forced-last-check', self._last_status_check)
self.log.debug("Set last check time to: %s", self._last_status_check)
tries = self.send_interval # NOTE: you dirty one...
while not self._last_status_check and tries > 0:
self.log.info("Waiting for ping...")
time.sleep(self.send_interval)
tries -= 1
self._postproc_phase3()
def _postproc_phase3(self):
try:
if self.send_data:
self.end_online()
if self._user.token and self.engine.stopping_reason:
exc_class = self.engine.stopping_reason.__class__.__name__
note = "%s: %s" % (exc_class, str(self.engine.stopping_reason))
self.append_note_to_session(note)
if self._master:
self.append_note_to_master(note)
except KeyboardInterrupt:
raise
except BaseException as exc:
self.log.debug("Failed to finish online: %s", traceback.format_exc())
self.log.warning("Failed to finish online: %s", exc)
def end_online(self):
"""
Finish online test
"""
if not self._session:
self.log.debug("Feeding not started, so not stopping")
else:
self.log.info("Ending data feeding...")
if self._user.token:
self._session.stop()
else:
self._session.stop_anonymous()
def append_note_to_session(self, note):
self._session.fetch()
if 'note' in self._session:
note = self._session['note'] + '\n' + note
note = note.strip()
if note:
self._session.set({'note': note[:NOTE_SIZE_LIMIT]})
def append_note_to_master(self, note):
self._master.fetch()
if 'note' in self._master:
note = self._master['note'] + '\n' + note
note = note.strip()
if note:
self._master.set({'note': note[:NOTE_SIZE_LIMIT]})
def check(self):
"""
Send data if any in buffer
"""
self.log.debug("KPI bulk buffer len: %s", len(self.kpi_buffer))
if self.last_dispatch < (time.time() - self.send_interval):
self.last_dispatch = time.time()
if self.send_data and len(self.kpi_buffer):
self.__send_data(self.kpi_buffer)
self.kpi_buffer = []
if self.send_monitoring:
self.__send_monitoring()
return super(BlazeMeterUploader, self).check()
def __send_data(self, data, do_check=True, is_final=False):
"""
:type data: list[bzt.modules.aggregator.DataPoint]
"""
if not self._session:
return
self.engine.aggregator.converter(data)
serialized = self._dpoint_serializer.get_kpi_body(data, is_final)
self._session.send_kpi_data(serialized, do_check)
def aggregated_second(self, data):
"""
Send online data
:param data: DataPoint
"""
if self.send_data:
self.kpi_buffer.append(data)
def monitoring_data(self, data):
if self.send_monitoring:
self.monitoring_buffer.record_data(data)
def __send_monitoring(self):
engine_id = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL', '')
if not engine_id:
engine_id = "0"
data = self.monitoring_buffer.get_monitoring_json(self._session)
self._session.send_monitoring_data(engine_id, data)
def __format_listing(self, zip_listing):
lines = []
for fname in sorted(zip_listing.keys()):
bytestr = humanize_bytes(zip_listing[fname])
if fname.startswith(self.engine.artifacts_dir):
fname = fname[len(self.engine.artifacts_dir) + 1:]
lines.append(bytestr + " " + fname)
return "\n".join(lines)
class MonitoringBuffer(object):
def __init__(self, size_limit, parent_log):
self.size_limit = size_limit
self.data = defaultdict(OrderedDict)
self.log = parent_log.getChild(self.__class__.__name__)
# data :: dict(datasource -> dict(interval -> datapoint))
# datapoint :: dict(metric -> value)
def record_data(self, data):
for monitoring_item in data:
item = copy.deepcopy(monitoring_item)
source = item.pop('source')
timestamp = int(item['ts'])
item['interval'] = 1
buff = self.data[source]
if timestamp in buff:
buff[timestamp].update(item)
else:
buff[timestamp] = item
sources = list(self.data)
for source in sources:
if len(self.data[source]) > self.size_limit:
self._downsample(self.data[source])
self.log.debug("Monitoring buffer size '%s': %s", source, len(self.data[source]))
def _downsample(self, buff):
size = 1
while len(buff) > self.size_limit:
self._merge_small_intervals(buff, size)
size += 1
def _merge_small_intervals(self, buff, size):
timestamps = list(buff)
merged_already = set()
for left, right in zip(timestamps, timestamps[1:]):
if left in merged_already:
continue
if buff[left]['interval'] <= size:
self._merge_datapoints(buff[left], buff[right])
buff.pop(right)
merged_already.add(left)
merged_already.add(right)
@staticmethod
def _merge_datapoints(left, right):
sum_size = float(left['interval'] + right['interval'])
for metric in set(right):
if metric in ('ts', 'interval'):
continue
if metric in left:
left[metric] = (left[metric] * left['interval'] + right[metric] * right['interval']) / sum_size
else:
left[metric] = right[metric]
left['interval'] = sum_size
def get_monitoring_json(self, session):
"""
:type session: Session
"""
results = {}
hosts = []
kpis = {}
for source, buff in iteritems(self.data):
for timestamp, item in iteritems(buff):
if source == 'local':
source = platform.node()
if source not in results:
results[source] = {
"name": source,
"intervals": OrderedDict()
}
if source not in hosts:
hosts.append(source)
src = results[source]
tstmp = timestamp * 1000
tstmp_key = '%d' % tstmp
if tstmp_key not in src['intervals']:
src['intervals'][tstmp_key] = {
"start": tstmp,
"duration": item['interval'] * 1000,
"indicators": {}
}
for field, value in iteritems(item):
if field.lower().startswith('conn-all'):
field = 'Connections'
elif field.lower().startswith('cpu'):
field = 'CPU'
elif field.lower().startswith('mem'):
field = 'Memory'
value *= 100
elif field == 'bytes-recv' or field.lower().startswith('net'):
field = 'Network I/O'
elif field == 'engine-loop':
field = 'Busy Taurus'
else:
continue # maybe one day BZA will accept all other metrics...
if field not in kpis:
kpis[field] = field
src['intervals'][tstmp_key]['indicators'][field] = {
"value": value,
"name": field,
"std": 0,
"mean": 0,
"sum": 0,
"min": 0,
"max": 0,
"sumOfSquares": 0,
"n": 1
}
kpis = {"Network I/O": "Network I/O", "Memory": "Memory", "CPU": "CPU", "Connections": "Connections"}
return {
"reportInfo": {
"sessionId": session['id'],
"timestamp": time.time(),
"userId": session['userId'],
"testId": session['testId'],
"type": "MONITOR",
"testName": ""
},
"kpis": kpis,
"hosts": hosts,
"results": results
}
class DatapointSerializer(object):
def __init__(self, owner):
"""
:type owner: BlazeMeterUploader
"""
super(DatapointSerializer, self).__init__()
self.owner = owner
self.multi = 1000 # miltiplier factor for reporting
def get_kpi_body(self, data_buffer, is_final):
# - reporting format:
# {labels: <data>, # see below
# sourceID: <id of BlazeMeterClient object>,
# [is_final: True]} # for last report
#
# - elements of 'data' are described in __get_label()
#
# - elements of 'intervals' are described in __get_interval()
# every interval contains info about response codes that were received on it.
report_items = BetterDict()
if data_buffer:
self.owner.first_ts = min(self.owner.first_ts, data_buffer[0][DataPoint.TIMESTAMP])
self.owner.last_ts = max(self.owner.last_ts, data_buffer[-1][DataPoint.TIMESTAMP])
# following data is received in the cumulative way
for label, kpi_set in iteritems(data_buffer[-1][DataPoint.CUMULATIVE]):
report_item = self.__get_label(label, kpi_set)
self.__add_errors(report_item, kpi_set) # 'Errors' tab
report_items[label] = report_item
# fill 'Timeline Report' tab with intervals data
# intervals are received in the additive way
if report_items:
for dpoint in data_buffer:
time_stamp = dpoint[DataPoint.TIMESTAMP]
for label, kpi_set in iteritems(dpoint[DataPoint.CURRENT]):
exc = TaurusInternalException('Cumulative KPISet is non-consistent')
report_item = report_items.get(label, exc)
report_item['intervals'].append(self.__get_interval(kpi_set, time_stamp))
report_items = [report_items[key] for key in sorted(report_items.keys())] # convert dict to list
data = {"labels": report_items, "sourceID": id(self.owner)}
if is_final:
data['final'] = True
return to_json(data)
@staticmethod
def __add_errors(report_item, kpi_set):
errors = kpi_set[KPISet.ERRORS]
for error in errors:
if error["type"] == KPISet.ERRTYPE_ERROR:
report_item['errors'].append({
'm': error['msg'],
"rc": error['rc'],
"count": error['cnt'],
})
elif error["type"] == KPISet.ERRTYPE_SUBSAMPLE:
report_item['failedEmbeddedResources'].append({
"count": error['cnt'],
"rm": error['msg'],
"rc": error['rc'],
"url": list(error['urls'])[0] if error['urls'] else None,
})
else:
report_item['assertions'].append({
'failureMessage': error['msg'],
'name': error['tag'] if error['tag'] else 'All Assertions',
'failures': error['cnt']
})
def __get_label(self, name, cumul):
return {
"n": cumul[KPISet.SAMPLE_COUNT], # total count of samples
"name": name if name else 'ALL', # label
"interval": 1, # not used
"intervals": [], # list of intervals, fill later
"samplesNotCounted": 0, # not used
"assertionsNotCounted": 0, # not used
"failedEmbeddedResources": [], # not used
"failedEmbeddedResourcesSpilloverCount": 0, # not used
"otherErrorsCount": 0, # not used
"errors": [], # list of errors, fill later
"assertions": [], # list of assertions, fill later
"percentileHistogram": [], # not used
"percentileHistogramLatency": [], # not used
"percentileHistogramBytes": [], # not used
"empty": False, # not used
"summary": self.__get_summary(cumul) # summary info
}
def __get_summary(self, cumul):
return {
"first": self.owner.first_ts,
"last": self.owner.last_ts,
"duration": self.owner.last_ts - self.owner.first_ts,
"failed": cumul[KPISet.FAILURES],
"hits": cumul[KPISet.SAMPLE_COUNT],
"avg": int(self.multi * cumul[KPISet.AVG_RESP_TIME]),
"min": int(self.multi * cumul[KPISet.PERCENTILES]["0.0"]) if "0.0" in cumul[KPISet.PERCENTILES] else 0,
"max": int(self.multi * cumul[KPISet.PERCENTILES]["100.0"]) if "100.0" in cumul[KPISet.PERCENTILES] else 0,
"std": int(self.multi * cumul[KPISet.STDEV_RESP_TIME]),
"tp90": int(self.multi * cumul[KPISet.PERCENTILES]["90.0"]) if "90.0" in cumul[KPISet.PERCENTILES] else 0,
"tp95": int(self.multi * cumul[KPISet.PERCENTILES]["95.0"]) if "95.0" in cumul[KPISet.PERCENTILES] else 0,
"tp99": int(self.multi * cumul[KPISet.PERCENTILES]["99.0"]) if "99.0" in cumul[KPISet.PERCENTILES] else 0,
"latencyAvg": int(self.multi * cumul[KPISet.AVG_LATENCY]),
"latencyMax": 0,
"latencyMin": 0,
"latencySTD": 0,
"bytes": cumul[KPISet.BYTE_COUNT],
"bytesMax": 0,
"bytesMin": 0,
"bytesAvg": int(cumul[KPISet.BYTE_COUNT] / float(cumul[KPISet.SAMPLE_COUNT])),
"bytesSTD": 0,
"otherErrorsSpillcount": 0,
}
def __get_interval(self, item, time_stamp):
# rc_list - list of info about response codes:
# {'n': <number of code encounters>,
# 'f': <number of failed request (e.q. important for assertions)>
# 'rc': <string value of response code>}
rc_list = []
for r_code, cnt in iteritems(item[KPISet.RESP_CODES]):
fails = [err['cnt'] for err in item[KPISet.ERRORS] if str(err['rc']) == r_code]
rc_list.append({"n": cnt, 'f': fails, "rc": r_code})
return {
"ec": item[KPISet.FAILURES],
"ts": time_stamp,
"na": item[KPISet.CONCURRENCY],
"n": item[KPISet.SAMPLE_COUNT],
"failed": item[KPISet.FAILURES],
"rc": rc_list,
"t": {
"min": int(self.multi * item[KPISet.PERCENTILES]["0.0"]) if "0.0" in item[KPISet.PERCENTILES] else 0,
"max": int(self.multi * item[KPISet.PERCENTILES]["100.0"]) if "100.0" in item[
KPISet.PERCENTILES] else 0,
"sum": self.multi * item[KPISet.AVG_RESP_TIME] * item[KPISet.SAMPLE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": self.multi * item[KPISet.STDEV_RESP_TIME],
"avg": self.multi * item[KPISet.AVG_RESP_TIME]
},
"lt": {
"min": 0,
"max": 0,
"sum": self.multi * item[KPISet.AVG_LATENCY] * item[KPISet.SAMPLE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": 0,
"avg": self.multi * item[KPISet.AVG_LATENCY]
},
"by": {
"min": 0,
"max": 0,
"sum": item[KPISet.BYTE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": 0,
"avg": item[KPISet.BYTE_COUNT] / float(item[KPISet.SAMPLE_COUNT])
},
}
| [((1859, 1870), 'time.time', 'time.time', ([], {}), '()\n', (1868, 1870), False, 'import time\n'), ((2131, 2137), 'bzt.bza.User', 'User', ([], {}), '()\n', (2135, 2137), False, 'from bzt.bza import User, Session, Test\n'), ((7511, 7520), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (7518, 7520), False, 'from io import BytesIO\n'), ((15648, 15672), 'collections.defaultdict', 'defaultdict', (['OrderedDict'], {}), '(OrderedDict)\n', (15659, 15672), False, 'from collections import defaultdict, OrderedDict\n'), ((17811, 17831), 'bzt.utils.iteritems', 'iteritems', (['self.data'], {}), '(self.data)\n', (17820, 17831), False, 'from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time\n'), ((21150, 21162), 'bzt.utils.BetterDict', 'BetterDict', ([], {}), '()\n', (21160, 21162), False, 'from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time\n'), ((22546, 22559), 'bzt.utils.to_json', 'to_json', (['data'], {}), '(data)\n', (22553, 22559), False, 'from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time\n'), ((26278, 26312), 'bzt.utils.iteritems', 'iteritems', (['item[KPISet.RESP_CODES]'], {}), '(item[KPISet.RESP_CODES])\n', (26287, 26312), False, 'from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time\n'), ((4268, 4304), 'bzt.bza.Session', 'Session', (['self._user', "{'id': sess_id}"], {}), "(self._user, {'id': sess_id})\n", (4275, 4304), False, 'from bzt.bza import User, Session, Test\n'), ((4480, 4529), 'bzt.bza.Test', 'Test', (['self._user', "{'id': self._session['testId']}"], {}), "(self._user, {'id': self._session['testId']})\n", (4484, 4529), False, 'from bzt.bza import User, Session, Test\n'), ((4548, 4595), 'bzt.TaurusConfigError', 'TaurusConfigError', (['"""Need signature for session"""'], {}), "('Need signature for session')\n", (4565, 4595), False, 'from bzt import TaurusInternalException, TaurusConfigError, TaurusNetworkError\n'), ((5730, 5748), 'sys.stdin.isatty', 'sys.stdin.isatty', ([], {}), '()\n', (5746, 5748), False, 'import sys\n'), ((7836, 7923), 'zipfile.ZipFile', 'zipfile.ZipFile', (['mfile'], {'mode': '"""w"""', 'compression': 'zipfile.ZIP_DEFLATED', 'allowZip64': '(True)'}), "(mfile, mode='w', compression=zipfile.ZIP_DEFLATED,\n allowZip64=True)\n", (7851, 7923), False, 'import zipfile\n'), ((7962, 7996), 'os.walk', 'os.walk', (['self.engine.artifacts_dir'], {}), '(self.engine.artifacts_dir)\n', (7969, 7996), False, 'import os\n'), ((13821, 13832), 'time.time', 'time.time', ([], {}), '()\n', (13830, 13832), False, 'import time\n'), ((15267, 15301), 'bzt.utils.humanize_bytes', 'humanize_bytes', (['zip_listing[fname]'], {}), '(zip_listing[fname])\n', (15281, 15301), False, 'from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time\n'), ((15938, 15968), 'copy.deepcopy', 'copy.deepcopy', (['monitoring_item'], {}), '(monitoring_item)\n', (15951, 15968), False, 'import copy\n'), ((17868, 17883), 'bzt.utils.iteritems', 'iteritems', (['buff'], {}), '(buff)\n', (17877, 17883), False, 'from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time\n'), ((21476, 21524), 'bzt.utils.iteritems', 'iteritems', (['data_buffer[-1][DataPoint.CUMULATIVE]'], {}), '(data_buffer[-1][DataPoint.CUMULATIVE])\n', (21485, 21524), False, 'from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time\n'), ((5363, 5435), 'bzt.modules.blazemeter.project_finder.ProjectFinder', 'ProjectFinder', (['self.parameters', 'self.settings', 'self._user', 'wsp', 'self.log'], {}), '(self.parameters, self.settings, self._user, wsp, self.log)\n', (5376, 5435), False, 'from bzt.modules.blazemeter.project_finder import ProjectFinder\n'), ((5543, 5573), 'bzt.bza.Test', 'Test', (['self._user', "{'id': None}"], {}), "(self._user, {'id': None})\n", (5547, 5573), False, 'from bzt.bza import User, Session, Test\n'), ((6468, 6485), 'bzt.utils.open_browser', 'open_browser', (['url'], {}), '(url)\n', (6480, 6485), False, 'from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time\n'), ((8830, 8855), 'os.path.getsize', 'os.path.getsize', (['filename'], {}), '(filename)\n', (8845, 8855), False, 'import os\n'), ((11091, 11121), 'bzt.utils.open_browser', 'open_browser', (['self.results_url'], {}), '(self.results_url)\n', (11103, 11121), False, 'from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time\n'), ((11846, 11876), 'time.sleep', 'time.sleep', (['self.send_interval'], {}), '(self.send_interval)\n', (11856, 11876), False, 'import time\n'), ((13753, 13764), 'time.time', 'time.time', ([], {}), '()\n', (13762, 13764), False, 'import time\n'), ((18675, 18690), 'bzt.utils.iteritems', 'iteritems', (['item'], {}), '(item)\n', (18684, 18690), False, 'from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time\n'), ((20104, 20115), 'time.time', 'time.time', ([], {}), '()\n', (20113, 20115), False, 'import time\n'), ((5243, 5342), 'bzt.TaurusNetworkError', 'TaurusNetworkError', (['"""Your account has no active workspaces, please contact BlazeMeter support"""'], {}), "(\n 'Your account has no active workspaces, please contact BlazeMeter support')\n", (5261, 5342), False, 'from bzt import TaurusInternalException, TaurusConfigError, TaurusNetworkError\n'), ((8069, 8097), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (8081, 8097), False, 'import os\n'), ((8216, 8242), 'os.path.getsize', 'os.path.getsize', (['full_path'], {}), '(full_path)\n', (8231, 8242), False, 'import os\n'), ((8766, 8792), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (8782, 8792), False, 'import os\n'), ((11423, 11445), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11443, 11445), False, 'import traceback\n'), ((12557, 12579), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (12577, 12579), False, 'import traceback\n'), ((17952, 17967), 'platform.node', 'platform.node', ([], {}), '()\n', (17965, 17967), False, 'import platform\n'), ((22005, 22041), 'bzt.utils.iteritems', 'iteritems', (['dpoint[DataPoint.CURRENT]'], {}), '(dpoint[DataPoint.CURRENT])\n', (22014, 22041), False, 'from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time\n'), ((9921, 9941), 'os.path.split', 'os.path.split', (['fname'], {}), '(fname)\n', (9934, 9941), False, 'import os\n'), ((18128, 18141), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18139, 18141), False, 'from collections import defaultdict, OrderedDict\n'), ((22073, 22135), 'bzt.TaurusInternalException', 'TaurusInternalException', (['"""Cumulative KPISet is non-consistent"""'], {}), "('Cumulative KPISet is non-consistent')\n", (22096, 22135), False, 'from bzt import TaurusInternalException, TaurusConfigError, TaurusNetworkError\n'), ((8348, 8396), 'os.path.relpath', 'os.path.relpath', (['root', 'self.engine.artifacts_dir'], {}), '(root, self.engine.artifacts_dir)\n', (8363, 8396), False, 'import os\n'), ((10250, 10257), 'bzt.utils.b', 'b', (['"""\n"""'], {}), "('\\n')\n", (10251, 10257), False, 'from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time\n')] |
wyli/nitorch | nitorch/nn/losses/_spatial.py | 3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac | """
Losses that assume an underlying spatial organization
(gradients, curvature, etc.)
"""
import torch
import torch.nn as tnn
from nitorch.core.pyutils import make_list, prod
from nitorch.core.utils import slice_tensor
from nitorch.spatial import diff1d
from ._base import Loss
class LocalFeatures(tnn.Module):
"""Base class for feature extractors.
Is it really useful?
"""
def __init__(self, bound='dct2', voxel_size=1, *args, **kwargs):
"""
Parameters
----------
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
voxel_size : float or list[float], default=1
Voxel size
"""
super().__init__(*args, **kwargs)
self.bound = bound
self.voxel_size = voxel_size
class Diff(LocalFeatures):
"""Finite differences."""
def __init__(self, order=1, side='c', dim=None, *args, **kwargs):
"""
Parameters
----------
order : int, default=1
Finite differences order
side : {'c', 'f', 'b'} or list[{'c', 'f', 'b'}], default='c'
Type of finite-differencesto extract about each voxel:
* 'c' : central -> `g[i] = (x[i+1] - x[i-1])/2`
* 'f' : forward -> `g[i] = (x[i+1] - x[i])`
* 'b' : backward -> `g[i] = (x[i] - x[i-1])`
dim : int or list[int], optional
Dimensions along which to compute the finite differences.
By default, all except the first two (batch and channel).
bound : BoundType or list[BoundType], default='dct2'
Boundary conditions, used to compute derivatives at the edges.
voxel_size : float or list[float], default=1
Voxel size
reduction : {'mean', 'sum'} or callable, default='mean'
Type of reduction to apply.
"""
super().__init__(*args, **kwargs)
self.order = order
self.side = side
self.dim = dim
def forward(self, x, **overload):
"""
Parameters
----------
x : tensor
Input tensor with shape (batch, channel, *spatial)
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
g : tensor
Finite differences with shape
(batch, channel, *spatial, len(dim), len(side))
If `dim` or `side` are scalars, not lists, their respective
dimension is dropped in the output tensor.
E.g., if `side='c'`, the output shape is
(batch, channel, *spatial, len(dim))
"""
order = overload.get('order', self.order)
side = make_list(overload.get('side', self.side))
drop_side_dim = not isinstance(side, (tuple, list))
side = make_list(side)
dim = overload.get('dim', self.dim)
dim = list(range(2, x.dim())) if dim is None else dim
drop_dim_dim = not isinstance(dim, (tuple, list))
dim = make_list(dim)
nb_dim = len(dim)
voxel_size = overload.get('voxel_size', self.voxel_size)
voxel_size = make_list(voxel_size, nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
diffs = []
for d, vx, bnd in zip(dim, voxel_size, bound):
sides = []
for s in side:
grad = diff1d(x, order=order, dim=d, voxel_size=vx,
side=s, bound=bnd)
sides.append(grad)
sides = torch.stack(sides, dim=-1)
diffs.append(sides)
diffs = torch.stack(diffs, dim=-2)
if drop_dim_dim:
diffs = slice_tensor(diffs, 0, dim=-2)
if drop_side_dim:
diffs = slice_tensor(diffs, 0, dim=-1)
return diffs
class MembraneLoss(Loss):
"""Compute the membrane energy (squared gradients) of a tensor.
The membrane energy of a field is the integral of its squared
gradient magnitude (l2 norm). This class extends this concept to
other norms of the gradient (l1, l{1,2}).
In the l2 case, if we name "f" the unit of the field and "m" the
spatial unit of a voxel, the output loss has unit `(f/m)**2`.
If `factor` is used to weight each voxel by its volume (as should
be done in a proper integration) the unit becomes
`(f/m)**2 * m**d = f**2 * m**(d-2)`.
In the l1 case, it is `f/m` in the absence of weighting and
`f * m**(d-1)` with volume weighting.
"""
def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None,
*args, **kwargs):
"""
Parameters
----------
voxel_size : float or list[float], default=1
Voxel size. Useful for anisotropic tensors (where the
sampling rate is higher in some directions than others).
factor : float or list[float], default=1
Scale the loss by a per-dimension factor. Useful when
working with resized tensor to compensate for different
number of voxels.
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
l1 : bool or int or list[int], default=None
Dimensions along which to apply a square root reduction
('l1 norm'), after taking the square. Dimensions are
those of the gradient map with shape
(batch, channel, *spatial, direction, side)
* False: nowhere == (squared) l2 norm
* True: everywhere == l1 norm
* Otherwise: l_{1,2} norm (group sparsity)
"""
super().__init__(*args, **kwargs)
self.voxel_size = voxel_size
self.factor = factor
self.bound = bound
self.l1 = l1
def forward(self, x, **overload):
"""
Parameters
----------
x : tensor
Input tensor
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
loss : scalar or tensor
The output shape depends on the type of reduction used.
If 'mean' or 'sum', this function returns a scalar.
"""
nb_dim = x.dim() - 2
voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim)
factor = make_list(overload.get('factor', self.factor), nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
l1 = overload.get('l1', self.l1)
# Compute spatial gradients
#
# TODO: when penalty == 'l2', for some boundary conditions, there's no
# need to compute both forward and backward gradients as they are
# the same (but shifted). For now, to avoid having to detect which
# cases can be accelerated, I always compute both (more general).
loss = Diff(side=['f', 'b'], bound=bound, voxel_size=voxel_size)(x)
loss = loss.square()
# Apply l1
if l1 not in (None, False):
if l1 is True:
loss = loss.sqrt()
else:
l1 = make_list(l1)
loss = loss.sum(dim=l1).sqrt() # TODO: use self.reduction instead of sum?
# Reduce
loss = super().forward(loss)
# Scale
factor = prod(factor)
if factor != 1:
loss = loss * factor
return loss
class BendingLoss(Loss):
"""Compute the bending energy (squared gradients) of a tensor.
The bending energy of a field is the integral of its squared
second-order derivatives magnitude (l2 norm).
This class extends this concept to other norms of the gradient
(l1, l{1,2}).
In the l2 case, if we name "f" the unit of the field and "m" the
spatial unit of a voxel, the output loss has unit `(f/m**2)**2`.
If `factor` is used to weight each voxel by its volume (as should
be done in a proper integration) the unit becomes
`(f/m**2)**2 * m**d = f**2 * m**(d-4)`.
In the l1 case, it is `f/m**2` in the absence of weighting and
`f * m**(d-2)` with volume weighting.
"""
def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None,
*args, **kwargs):
"""
Parameters
----------
voxel_size : float or list[float], default=1
Voxel size. Useful for anisotropic tensors (where the
sampling rate is higher in some directions than others).
factor : float or list[float], default=1
Scale the loss by a per-dimension factor. Useful when
working with resized tensor to compensate for different
number of voxels.
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
l1 : bool or int or list[int], default=None
Dimensions along which to apply a square root reduction
('l1 norm'), after taking the square. Dimensions are
those of the gradient map with shape
(batch, channel, *spatial, direction)
* False: nowhere == (squared) l2 norm
* True: everywhere == l1 norm
* Otherwise: l_{1,2} norm (group sparsity)
"""
super().__init__(*args, **kwargs)
self.voxel_size = voxel_size
self.factor = factor
self.bound = bound
self.l1 = l1
def forward(self, x, **overload):
"""
Parameters
----------
x : tensor
Input tensor
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
loss : scalar or tensor
The output shape depends on the type of reduction used.
If 'mean' or 'sum', this function returns a scalar.
"""
nb_dim = x.dim() - 2
voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim)
factor = make_list(overload.get('factor', self.factor), nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
l1 = overload.get('l1', self.l1)
# Compute spatial gradients
loss = Diff(order=2, side='c', bound=bound, voxel_size=voxel_size)(x)
loss = loss.square()
# Apply l1
if l1 not in (None, False):
if l1 is True:
loss = loss.sqrt()
else:
l1 = make_list(l1)
loss = loss.sum(dim=l1).sqrt()
# Reduce
loss = super().forward(loss)
# Scale
factor = prod(factor)
if factor != 1:
loss = loss * factor
return loss
class LameShearLoss(Loss):
"""Strain-part of the (Linear)-Elastic energy (penalty on shears).
= second Lame constant = shear modulus
The shear energy of a deformation field is the integral of the square
magnitude (l2 norm) of the symetric part diagonal terms of its Jacobian.
This class extends this concept to other norms of the gradient
(l1, l{1,2}).
In the l2 case, E = sum_{i != j} (dv[i]/dx[j]) ** 2.
"""
def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None,
exclude_zooms=False, *args, **kwargs):
"""
Parameters
----------
voxel_size : float or list[float], default=1
Voxel size. Useful for anisotropic tensors (where the
sampling rate is higher in some directions than others).
factor : float or list[float], default=1
Scale the loss by a per-dimension factor. Useful when
working with resized tensor to compensate for different
number of voxels.
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
l1 : bool or int or list[int], default=None
Dimensions along which to apply a square root reduction
('l1 norm'), after taking the square. Dimensions are
those of the gradient map with shape
(batch, channel, *spatial, side)
* False: nowhere == (squared) l2 norm
* True: everywhere == l1 norm
* Otherwise: l_{1,2} norm (group sparsity)
Here, `channel` map to elements of the Jacobian matrix, while
`side` map to the combination of sides (forward/backward)
used when extracting finite differences. Therefore, the
number of channels is dim*(dim+1)//2 and the number of sides
is 4.
exclude_zooms : bool, default=False
Do not include diagonal elements of the Jacobian in the
penalty (i.e., penalize only shears)
"""
super().__init__(*args, **kwargs)
self.voxel_size = voxel_size
self.factor = factor
self.bound = bound
self.l1 = l1
self.exclude_zooms = exclude_zooms
def forward(self, x, **overload):
"""
Parameters
----------
x : (batch, ndim, *spatial) tensor
Input displacement tensor (in channel first order)
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
loss : scalar or tensor
The output shape depends on the type of reduction used.
If 'mean' or 'sum', this function returns a scalar.
"""
nb_dim = x.dim() - 2
voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim)
factor = make_list(overload.get('factor', self.factor), nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
l1 = overload.get('l1', self.l1)
exclude_zooms = overload.get('exclude_zooms', self.exclude_zooms)
# Compute spatial gradients
loss_diag = [] # diagonal elements of the Jacobian
loss_offdiag = [] # off-diagonal elements of hte (symmetric) Jacobian
for i in range(nb_dim):
# symmetric part
x_i = x[:, i:i+1, ...]
subloss_diag = []
subloss_offdiag = []
for j in range(nb_dim):
for side_i in ('f', 'b'):
diff = Diff(dim=[j+2], side=side_i, bound=bound,
voxel_size=voxel_size)
diff_ij = diff(x_i)
if i == j:
# diagonal elements
if not exclude_zooms:
subloss_diag.append(diff_ij)
else:
# off diagonal elements
x_j = x[:, j:j+1, ...]
for side_j in ('f', 'b'):
diff = Diff(dim=[i+2], side=side_j, bound=bound,
voxel_size=voxel_size)
diff_ji = diff(x_j)
subloss_offdiag.append((diff_ij + diff_ji)/2)
if not exclude_zooms:
loss_diag.append(torch.stack(subloss_diag, dim=-1))
loss_offdiag.append(torch.stack(subloss_offdiag, dim=-1))
if not exclude_zooms:
loss_diag = torch.cat(loss_diag, dim=1)
loss_offdiag = torch.cat(loss_offdiag, dim=1)
if l1 not in (None, False):
# Apply l1 reduction
if l1 is True:
if not exclude_zooms:
loss_diag = loss_diag.abs()
loss_offdiag = loss_offdiag.abs()
else:
l1 = make_list(l1)
if not exclude_zooms:
loss_diag = loss_diag.square().sum(dim=l1, keepdim=True).sqrt()
loss_offdiag = loss_offdiag.square().sum(dim=l1, keepdim=True).sqrt()
else:
# Apply l2 reduction
if not exclude_zooms:
loss_diag = loss_diag.square()
loss_offdiag = loss_offdiag.square()
# Mean reduction across sides
if not exclude_zooms:
loss_diag = loss_diag.mean(dim=-1)
loss_offdiag = loss_offdiag.mean(dim=-1)
# Weighted reduction across elements
if not exclude_zooms:
if loss_diag.shape[1] == 1:
# element dimension already reduced -> we need a small hack
loss = (loss_diag.square() + 2*loss_offdiag.square()) / (nb_dim**2)
loss = loss.sum(dim=1, keepdim=True).sqrt()
else:
# simple weighted average
loss = (loss_diag.sum(dim=1, keepdim=True) +
loss_offdiag.sum(dim=1, keepdim=True)*2) / (nb_dim**2)
else:
loss = loss_offdiag.sum(dim=1, keepdim=True)*2 / (nb_dim**2)
# Reduce
loss = super().forward(loss)
# Scale
factor = prod(factor)
if factor != 1:
loss = loss * factor
return loss
class LameZoomLoss(Loss):
"""Compression-part of the (Linear)-Elastic energy (penalty on volume change).
= first Lame constant
The compression energy of a deformation field is the integral of the square
magnitude (l2 norm) of the trace its Jacobian.
This class extends this concept to other norms of the gradient
(l1, l{1,2}).
In the l2 case, E = sum_{ij} (dv[i]/dx[j] + dv[j]/dx[i]) ** 2.
"""
def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None,
*args, **kwargs):
"""
Parameters
----------
voxel_size : float or list[float], default=1
Voxel size. Useful for anisotropic tensors (where the
sampling rate is higher in some directions than others).
factor : float or list[float], default=1
Scale the loss by a per-dimension factor. Useful when
working with resized tensor to compensate for different
number of voxels.
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
l1 : bool or int or list[int], default=None
Dimensions along which to apply a square root reduction
('l1 norm'), after taking the square. Dimensions are
those of the gradient map with shape
(batch, channel, *spatial, direction, side)
* False: nowhere == (squared) l2 norm
* True: everywhere == l1 norm
* Otherwise: l_{1,2} norm (group sparsity)
"""
super().__init__(*args, **kwargs)
self.voxel_size = voxel_size
self.factor = factor
self.bound = bound
self.l1 = l1
def forward(self, x, **overload):
"""
Parameters
----------
x : tensor
Input tensor
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
loss : scalar or tensor
The output shape depends on the type of reduction used.
If 'mean' or 'sum', this function returns a scalar.
"""
nb_dim = x.dim() - 2
voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim)
factor = make_list(overload.get('factor', self.factor), nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
l1 = overload.get('l1', self.l1)
# Compute spatial gradients
loss = []
for i in range(nb_dim):
x_i = x[:, i:i+1, ...]
diff = Diff(dim=[i], side=['f', 'b'], bound=bound,
voxel_size=voxel_size)
loss.append(diff(x_i))
loss = torch.cat(loss, dim=1)
loss = loss.square()
# Apply l1
if l1 not in (None, False):
if l1 is True:
loss = loss.sqrt()
else:
l1 = make_list(l1)
loss = loss.sum(dim=l1, keepdim=True).sqrt()
# Mean reduction across sides
loss = loss.mean(dim=-1)
# Reduce
loss = super().forward(loss)
# Scale
factor = prod(factor)
if factor != 1:
loss = loss * factor
return loss
| [((2889, 2904), 'nitorch.core.pyutils.make_list', 'make_list', (['side'], {}), '(side)\n', (2898, 2904), False, 'from nitorch.core.pyutils import make_list, prod\n'), ((3083, 3097), 'nitorch.core.pyutils.make_list', 'make_list', (['dim'], {}), '(dim)\n', (3092, 3097), False, 'from nitorch.core.pyutils import make_list, prod\n'), ((3211, 3240), 'nitorch.core.pyutils.make_list', 'make_list', (['voxel_size', 'nb_dim'], {}), '(voxel_size, nb_dim)\n', (3220, 3240), False, 'from nitorch.core.pyutils import make_list, prod\n'), ((3682, 3708), 'torch.stack', 'torch.stack', (['diffs'], {'dim': '(-2)'}), '(diffs, dim=-2)\n', (3693, 3708), False, 'import torch\n'), ((7441, 7453), 'nitorch.core.pyutils.prod', 'prod', (['factor'], {}), '(factor)\n', (7445, 7453), False, 'from nitorch.core.pyutils import make_list, prod\n'), ((10749, 10761), 'nitorch.core.pyutils.prod', 'prod', (['factor'], {}), '(factor)\n', (10753, 10761), False, 'from nitorch.core.pyutils import make_list, prod\n'), ((15463, 15493), 'torch.cat', 'torch.cat', (['loss_offdiag'], {'dim': '(1)'}), '(loss_offdiag, dim=1)\n', (15472, 15493), False, 'import torch\n'), ((17042, 17054), 'nitorch.core.pyutils.prod', 'prod', (['factor'], {}), '(factor)\n', (17046, 17054), False, 'from nitorch.core.pyutils import make_list, prod\n'), ((19898, 19920), 'torch.cat', 'torch.cat', (['loss'], {'dim': '(1)'}), '(loss, dim=1)\n', (19907, 19920), False, 'import torch\n'), ((20343, 20355), 'nitorch.core.pyutils.prod', 'prod', (['factor'], {}), '(factor)\n', (20347, 20355), False, 'from nitorch.core.pyutils import make_list, prod\n'), ((3607, 3633), 'torch.stack', 'torch.stack', (['sides'], {'dim': '(-1)'}), '(sides, dim=-1)\n', (3618, 3633), False, 'import torch\n'), ((3755, 3785), 'nitorch.core.utils.slice_tensor', 'slice_tensor', (['diffs', '(0)'], {'dim': '(-2)'}), '(diffs, 0, dim=-2)\n', (3767, 3785), False, 'from nitorch.core.utils import slice_tensor\n'), ((3832, 3862), 'nitorch.core.utils.slice_tensor', 'slice_tensor', (['diffs', '(0)'], {'dim': '(-1)'}), '(diffs, 0, dim=-1)\n', (3844, 3862), False, 'from nitorch.core.utils import slice_tensor\n'), ((15412, 15439), 'torch.cat', 'torch.cat', (['loss_diag'], {'dim': '(1)'}), '(loss_diag, dim=1)\n', (15421, 15439), False, 'import torch\n'), ((3458, 3521), 'nitorch.spatial.diff1d', 'diff1d', (['x'], {'order': 'order', 'dim': 'd', 'voxel_size': 'vx', 'side': 's', 'bound': 'bnd'}), '(x, order=order, dim=d, voxel_size=vx, side=s, bound=bnd)\n', (3464, 3521), False, 'from nitorch.spatial import diff1d\n'), ((7247, 7260), 'nitorch.core.pyutils.make_list', 'make_list', (['l1'], {}), '(l1)\n', (7256, 7260), False, 'from nitorch.core.pyutils import make_list, prod\n'), ((10599, 10612), 'nitorch.core.pyutils.make_list', 'make_list', (['l1'], {}), '(l1)\n', (10608, 10612), False, 'from nitorch.core.pyutils import make_list, prod\n'), ((15320, 15356), 'torch.stack', 'torch.stack', (['subloss_offdiag'], {'dim': '(-1)'}), '(subloss_offdiag, dim=-1)\n', (15331, 15356), False, 'import torch\n'), ((15766, 15779), 'nitorch.core.pyutils.make_list', 'make_list', (['l1'], {}), '(l1)\n', (15775, 15779), False, 'from nitorch.core.pyutils import make_list, prod\n'), ((20107, 20120), 'nitorch.core.pyutils.make_list', 'make_list', (['l1'], {}), '(l1)\n', (20116, 20120), False, 'from nitorch.core.pyutils import make_list, prod\n'), ((15253, 15286), 'torch.stack', 'torch.stack', (['subloss_diag'], {'dim': '(-1)'}), '(subloss_diag, dim=-1)\n', (15264, 15286), False, 'import torch\n')] |
roberthtamayose/digitalmenu | items/models.py | 19c6633844934fd95f861674946da386411a19c9 | from django.db import models
from django.utils import timezone
class Categoria(models.Model):
nome = models.CharField(max_length=255)
def __str__(self):
return self.nome
class Item(models.Model):
nome = models.CharField(max_length=255)
data_criacao = models.DateTimeField(default=timezone.now)
descricao = models.TextField(blank=True)
categoria = models.ForeignKey(Categoria, on_delete=models.DO_NOTHING)
ocultar = models.BooleanField(default=False)
foto = models.ImageField(blank=True, upload_to='fotos/%y/%m/')
def __str__(self):
return self.nome
| [((107, 139), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (123, 139), False, 'from django.db import models\n'), ((228, 260), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (244, 260), False, 'from django.db import models\n'), ((280, 322), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (300, 322), False, 'from django.db import models\n'), ((339, 367), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (355, 367), False, 'from django.db import models\n'), ((384, 441), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Categoria'], {'on_delete': 'models.DO_NOTHING'}), '(Categoria, on_delete=models.DO_NOTHING)\n', (401, 441), False, 'from django.db import models\n'), ((456, 490), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (475, 490), False, 'from django.db import models\n'), ((502, 557), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'upload_to': '"""fotos/%y/%m/"""'}), "(blank=True, upload_to='fotos/%y/%m/')\n", (519, 557), False, 'from django.db import models\n')] |
zeroday0619/XenXenXenSe | app/services/__init__.py | 5af079e5edde3a6e4a1f5868052480d7b140d87c | from app.services.console import Console
from app.services.server import Server
__main__ = ["server", "console"]
| [] |
nuclearsandwich-ros/twist_mux-release | launch/twist_mux_launch.py | d92dcda0255e727b899d3bac62ef3d89c19cb38e | #!/usr/bin/env python3
# Copyright 2020 Gaitech Korea Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Brighten Lee
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
default_config_locks = os.path.join(get_package_share_directory('twist_mux'),
'config', 'twist_mux_locks.yaml')
default_config_topics = os.path.join(get_package_share_directory('twist_mux'),
'config', 'twist_mux_topics.yaml')
default_config_joystick = os.path.join(get_package_share_directory('twist_mux'),
'config', 'joystick.yaml')
return LaunchDescription([
DeclareLaunchArgument(
'config_locks',
default_value=default_config_locks,
description='Default locks config file'),
DeclareLaunchArgument(
'config_topics',
default_value=default_config_topics,
description='Default topics config file'),
DeclareLaunchArgument(
'config_joy',
default_value=default_config_joystick,
description='Default joystick config file'),
DeclareLaunchArgument(
'cmd_vel_out',
default_value='twist_mux/cmd_vel',
description='cmd vel output topic'),
Node(
package='twist_mux',
executable='twist_mux',
output='screen',
remappings={('/cmd_vel_out', LaunchConfiguration('cmd_vel_out'))},
parameters=[
LaunchConfiguration('config_locks'),
LaunchConfiguration('config_topics'),
LaunchConfiguration('config_joy')]
),
Node(
package='twist_mux',
executable='twist_marker',
output='screen',
remappings={('/twist', LaunchConfiguration('cmd_vel_out'))},
parameters=[{
'frame_id': 'base_link',
'scale': 1.0,
'vertical_position': 2.0}])
])
| [((965, 1005), 'ament_index_python.packages.get_package_share_directory', 'get_package_share_directory', (['"""twist_mux"""'], {}), "('twist_mux')\n", (992, 1005), False, 'from ament_index_python.packages import get_package_share_directory\n'), ((1122, 1162), 'ament_index_python.packages.get_package_share_directory', 'get_package_share_directory', (['"""twist_mux"""'], {}), "('twist_mux')\n", (1149, 1162), False, 'from ament_index_python.packages import get_package_share_directory\n'), ((1283, 1323), 'ament_index_python.packages.get_package_share_directory', 'get_package_share_directory', (['"""twist_mux"""'], {}), "('twist_mux')\n", (1310, 1323), False, 'from ament_index_python.packages import get_package_share_directory\n'), ((1435, 1553), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', (['"""config_locks"""'], {'default_value': 'default_config_locks', 'description': '"""Default locks config file"""'}), "('config_locks', default_value=default_config_locks,\n description='Default locks config file')\n", (1456, 1553), False, 'from launch.actions import DeclareLaunchArgument\n'), ((1596, 1717), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', (['"""config_topics"""'], {'default_value': 'default_config_topics', 'description': '"""Default topics config file"""'}), "('config_topics', default_value=default_config_topics,\n description='Default topics config file')\n", (1617, 1717), False, 'from launch.actions import DeclareLaunchArgument\n'), ((1760, 1882), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', (['"""config_joy"""'], {'default_value': 'default_config_joystick', 'description': '"""Default joystick config file"""'}), "('config_joy', default_value=default_config_joystick,\n description='Default joystick config file')\n", (1781, 1882), False, 'from launch.actions import DeclareLaunchArgument\n'), ((1925, 2036), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', (['"""cmd_vel_out"""'], {'default_value': '"""twist_mux/cmd_vel"""', 'description': '"""cmd vel output topic"""'}), "('cmd_vel_out', default_value='twist_mux/cmd_vel',\n description='cmd vel output topic')\n", (1946, 2036), False, 'from launch.actions import DeclareLaunchArgument\n'), ((2303, 2338), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""config_locks"""'], {}), "('config_locks')\n", (2322, 2338), False, 'from launch.substitutions import LaunchConfiguration\n'), ((2356, 2392), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""config_topics"""'], {}), "('config_topics')\n", (2375, 2392), False, 'from launch.substitutions import LaunchConfiguration\n'), ((2410, 2443), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""config_joy"""'], {}), "('config_joy')\n", (2429, 2443), False, 'from launch.substitutions import LaunchConfiguration\n'), ((2224, 2258), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""cmd_vel_out"""'], {}), "('cmd_vel_out')\n", (2243, 2258), False, 'from launch.substitutions import LaunchConfiguration\n'), ((2607, 2641), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""cmd_vel_out"""'], {}), "('cmd_vel_out')\n", (2626, 2641), False, 'from launch.substitutions import LaunchConfiguration\n')] |
psu-capstone-teamD/ElementalAuth | Tests/testLiveService.py | d896efad5a3e4cb453c324afc456aa82f82da239 | import sys
import unittest
import requests_mock
from mock import patch
sys.path.append('services/LiveService')
from LiveService import LiveService
L = LiveService()
baseURL = "https://yanexx65s8e1.live.elementalclouddev.com/api"
class LiveServiceTest(unittest.TestCase):
'''@patch('services.LiveService.LiveService.time', return_value=1502345833)
def testSetHeaders(self, mock_time):
headers = L.setHeaders("/schedules")
self.assertEqual(headers, {'X-Auth-Expires': '1502345863',
'X-Auth-Key': '9c9a72cd3a8feec48539f1943afbef8d',
'Content-type': 'application/xml',
'X-Auth-User': '',
'Accept': 'application/xml'})'''
@requests_mock.Mocker()
def testGetStatus(self, m):
m.get(baseURL + "/live_events/150/status", status_code=200)
resp = L.getLiveEventStatus(150)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetEvents(self, m):
m.get(baseURL + "/live_events", status_code=200)
m.get(baseURL + "/live_events?filter=running", status_code=200)
resp = L.getLiveEvents(None)
self.assertEqual(resp.status_code, 200)
resp = L.getLiveEvents("running")
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetEvent(self, m):
m.get(baseURL + "/live_events/164", status_code=200)
resp = L.getLiveEvent(164)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetSchedules(self, m):
m.get(baseURL + "/schedules", status_code=200)
resp = L.getSchedules()
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetLiveProfiles(self, m):
m.get(baseURL + "/live_event_profiles", status_code=200)
resp = L.getLiveProfiles()
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetLiveProfile(self, m):
m.get(baseURL + "/live_event_profiles/11", status_code=200)
resp = L.getLiveProfile(11)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testCreateLiveEvent(self, m):
with open('Tests/test_XML/live_event.xml', 'r') as infile:
xml = infile.read()
m.post(baseURL + "/live_events", status_code=201)
resp = L.createEvent(xml)
self.assertEqual(resp.status_code, 201)
@requests_mock.Mocker()
def testCreateSchedule(self, m):
with open('Tests/test_XML/schedule.xml', 'r') as infile:
xml = infile.read()
m.post(baseURL + "/schedules", status_code=201)
resp = L.createSchedule(xml)
self.assertEqual(resp.status_code, 201)
@requests_mock.Mocker()
def testCreateProfile(self, m):
with open('Tests/test_XML/schedule.xml', 'r') as infile:
xml = infile.read()
m.post(baseURL + "/schedules", status_code=201)
resp = L.createSchedule(xml)
self.assertEqual(resp.status_code, 201)
@requests_mock.Mocker()
def testUpdateEvent(self, m):
with open('Tests/test_XML/live_event.xml', 'r') as infile:
xml = infile.read()
m.put(baseURL + "/live_events/50", status_code=200)
resp = L.updateLiveEvent(50, xml)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testUpdatePlaylist(self, m):
with open('Tests/test_XML/live_event.xml', 'r') as infile:
xml = infile.read()
m.post(baseURL + "/live_events/92/playlist", status_code=200)
resp = L.updatePlaylist(92, xml)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testUpdateSchedule(self, m):
with open('Tests/test_XML/schedule.xml', 'r') as infile:
xml = infile.read()
m.put(baseURL + "/schedules/13", status_code=200)
resp = L.updateSchedule(13, xml)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testUpdateProfile(self, m):
with open('Tests/test_XML/live_profile.xml', 'r') as infile:
xml = infile.read()
m.put(baseURL + "/live_event_profiles/33", status_code=200)
resp = L.updateProfile(33, xml)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testRemoveLiveEvent(self, m):
m.delete(baseURL + "/live_events/191", status_code=200)
resp = L.removeEvent(191)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testRemoveSchedule(self, m):
m.delete(baseURL + "/schedules/13", status_code=200)
resp = L.removeSchedule(13)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testRemoveProfile(self, m):
m.delete(baseURL + "/live_event_profiles/33", status_code=200)
resp = L.removeProfile(33)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testStartEvent(self, m):
m.post(baseURL + "/live_events/50/start", status_code=200)
resp = L.startLiveEvent(50)
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
| [((71, 110), 'sys.path.append', 'sys.path.append', (['"""services/LiveService"""'], {}), "('services/LiveService')\n", (86, 110), False, 'import sys\n'), ((154, 167), 'LiveService.LiveService', 'LiveService', ([], {}), '()\n', (165, 167), False, 'from LiveService import LiveService\n'), ((798, 820), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (818, 820), False, 'import requests_mock\n'), ((1016, 1038), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (1036, 1038), False, 'import requests_mock\n'), ((1381, 1403), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (1401, 1403), False, 'import requests_mock\n'), ((1585, 1607), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (1605, 1607), False, 'import requests_mock\n'), ((1784, 1806), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (1804, 1806), False, 'import requests_mock\n'), ((1999, 2021), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (2019, 2021), False, 'import requests_mock\n'), ((2217, 2239), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (2237, 2239), False, 'import requests_mock\n'), ((2523, 2545), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (2543, 2545), False, 'import requests_mock\n'), ((2827, 2849), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (2847, 2849), False, 'import requests_mock\n'), ((3130, 3152), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (3150, 3152), False, 'import requests_mock\n'), ((3442, 3464), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (3462, 3464), False, 'import requests_mock\n'), ((3766, 3788), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (3786, 3788), False, 'import requests_mock\n'), ((4076, 4098), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (4096, 4098), False, 'import requests_mock\n'), ((4398, 4420), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (4418, 4420), False, 'import requests_mock\n'), ((4611, 4633), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (4631, 4633), False, 'import requests_mock\n'), ((4822, 4844), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (4842, 4844), False, 'import requests_mock\n'), ((5041, 5063), 'requests_mock.Mocker', 'requests_mock.Mocker', ([], {}), '()\n', (5061, 5063), False, 'import requests_mock\n'), ((5282, 5297), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5295, 5297), False, 'import unittest\n')] |
LionelMassoulard/aikit | tests/models/test_stacking.py | 98b2abaa3bf47ab46f2fd3c270010293de06dba9 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 11:49:10 2018
@author: Lionel Massoulard
"""
import pytest
import numpy as np
import pandas as pd
from sklearn.base import is_regressor, is_classifier
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.dummy import DummyRegressor
from aikit.models.stacking import OutSamplerTransformer, StackerClassifier, StackerRegressor
def test_OutSamplerTransformer_classifier():
np.random.seed(123)
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
model = OutSamplerTransformer(RandomForestClassifier(n_estimators=10, random_state=123))
model.fit(X, y)
p1 = model.model.predict_proba(X)
p2 = model.transform(X)
assert not is_classifier(model)
assert not is_regressor(model)
assert np.abs(p1[:, 1] - p2[:, 0]).max() <= 10 ** (-10)
assert p2.shape == (100, 1)
assert model.get_feature_names() == ["RandomForestClassifier__1"]
y = np.array(["a", "b", "c"])[np.random.randint(0, 3, 100)]
model = OutSamplerTransformer(RandomForestClassifier(n_estimators=10, random_state=123))
model.fit(X, y)
p1 = model.model.predict_proba(X)
p2 = model.transform(X)
assert p1.shape == (100, 3)
assert p2.shape == (100, 3)
assert np.abs(p1 - p2).max() <= 10 ** (-10)
assert model.get_feature_names() == [
"RandomForestClassifier__a",
"RandomForestClassifier__b",
"RandomForestClassifier__c",
]
def test_OutSampleTransformer_classifier_unbalanced():
np.random.seed(123)
X = np.random.randn(100, 2)
y = np.array(["AA"] * 33 + ["BB"] * 33 + ["CC"] * 33 + ["DD"])
model = OutSamplerTransformer(RandomForestClassifier(n_estimators=10, random_state=123))
p3 = model.fit_transform(X, y)
assert (p3.max(axis=1) > 0).all()
def test_OutSamplerTransformer_classifier_fit_transform():
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
cv = KFold(n_splits=10, shuffle=True, random_state=123)
model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv)
model.fit(X, y)
y1 = model.transform(X)
model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv)
y2 = model.fit_transform(X, y)
assert np.abs(y1 - y2).flatten().max() >= 0.01 # vector should be different
def test_OutSamplerTransformer_regressor():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
model = OutSamplerTransformer(RandomForestRegressor(n_estimators=10,random_state=123), cv=10)
model.fit(X, y)
y1 = model.model.predict(X)
y2 = model.transform(X)
assert not is_classifier(model)
assert not is_regressor(model)
assert np.abs(y1 - y2[:, 0]).max() <= 10 ** (-10)
assert y2.shape == (100, 1)
assert model.get_feature_names() == ["RandomForestRegressor__target"]
def test_OutSamplerTransformer_regressor_fit_transform():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
cv = KFold(n_splits=10, shuffle=True, random_state=123)
model = OutSamplerTransformer(DummyRegressor(), cv=cv)
model.fit(X, y)
y1 = model.transform(X)
model = OutSamplerTransformer(DummyRegressor(), cv=cv)
y2 = model.fit_transform(X, y)
assert np.abs(y1 - y2).flatten().max() >= 0.01 # vector should be different
def test_approx_cross_validation_OutSamplerTransformer_regressor():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
model = OutSamplerTransformer(RandomForestRegressor(random_state=123), cv=10)
cv_res, yhat = model.approx_cross_validation(X, y, cv=10, method="transform", no_scoring=True)
assert cv_res is None
assert yhat.ndim == 2
assert yhat.shape == (y.shape[0], 1)
with pytest.raises(NotFittedError):
model.transform(X)
cv = KFold(n_splits=10, shuffle=True, random_state=123)
model = OutSamplerTransformer(DummyRegressor(), cv=cv)
yhat1 = model.fit_transform(X, y)
cv_res, yhat2 = model.approx_cross_validation(X, y, cv=cv, method="transform", no_scoring=True, return_predict=True)
# Approx cross val and fit transform should return the same thing here
assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5)
yhat3 = np.zeros((y.shape[0], 1))
for train, test in cv.split(X, y):
model = DummyRegressor()
model.fit(X[train, :], y[train])
yhat3[test, 0] = model.predict(X[test, :])
assert np.abs((yhat1 - yhat3).flatten()).max() <= 10 ** (-5)
assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5)
def test_approx_cross_validation_OutSamplerTransformer_classifier():
np.random.seed(123)
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
model = OutSamplerTransformer(RandomForestClassifier(random_state=123), cv=10)
cv_res, yhat = model.approx_cross_validation(X, y, cv=10, method="transform", no_scoring=True)
assert cv_res is None
assert yhat.ndim == 2
assert yhat.shape == (y.shape[0], 1)
with pytest.raises(NotFittedError):
model.transform(X)
with pytest.raises(NotFittedError):
model.model.predict(X)
cv = KFold(n_splits=10, shuffle=True, random_state=123)
model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv)
yhat1 = model.fit_transform(X, y)
model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv)
cv_res, yhat2 = model.approx_cross_validation(X, y, cv=cv, method="transform", no_scoring=True, return_predict=True)
# Approx cross val and fit transform should return the same thing here
assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5)
yhat3 = np.zeros((y.shape[0], 1))
for train, test in cv.split(X, y):
model = LogisticRegression()
model.fit(X[train, :], y[train])
yhat3[test, 0] = model.predict_proba(X[test, :])[:, 1]
assert np.abs((yhat1 - yhat3).flatten()).max() <= 10 ** (-5)
assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5)
def test_StackerRegressor():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
stacker = StackerRegressor(models=[RandomForestRegressor(n_estimators=10,random_state=123), Ridge(random_state=123)], cv=10, blender=Ridge(random_state=123))
stacker.fit(X, y)
yhat = stacker.predict(X)
assert yhat.ndim == 1
assert yhat.shape[0] == X.shape[0]
assert is_regressor(stacker)
assert not is_classifier(stacker)
with pytest.raises(AttributeError):
stacker.predict_proba(X)
with pytest.raises(AttributeError):
stacker.classes_
def test_StackerClassifier():
np.random.seed(123)
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
stacker = StackerClassifier(
models=[RandomForestClassifier(random_state=123), LogisticRegression(C=1,random_state=123)], cv=10, blender=LogisticRegression(C=1,random_state=123)
)
stacker.fit(X, y)
yhat = stacker.predict(X)
assert yhat.ndim == 1
assert yhat.shape[0] == X.shape[0]
assert list(set(yhat)) == [0, 1]
assert list(stacker.classes_) == [0, 1]
yhat_proba = stacker.predict_proba(X)
assert yhat_proba.shape == (y.shape[0], 2)
assert not is_regressor(stacker)
assert is_classifier(stacker)
def test_approx_cross_validation_StackerRegressor():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
stacker = StackerRegressor(models=[RandomForestRegressor(n_estimators=10,random_state=123), Ridge(random_state=123)], cv=10, blender=Ridge(random_state=123))
cv_res, yhat = stacker.approx_cross_validation(
X, y, cv=10, method="predict", scoring=["neg_mean_squared_error"], return_predict=True, verbose=False
)
assert cv_res is not None
assert isinstance(cv_res, pd.DataFrame)
assert cv_res.shape[0] == 10
assert "test_neg_mean_squared_error" in cv_res
assert "train_neg_mean_squared_error" in cv_res
assert yhat.ndim == 1
assert yhat.shape[0] == y.shape[0]
with pytest.raises(NotFittedError):
stacker.predict(X)
for m in stacker.models:
with pytest.raises(NotFittedError):
m.predict(X)
def test_approx_cross_validation_StackerClassifier():
np.random.seed(123)
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
stacker = StackerClassifier(
models=[RandomForestClassifier(n_estimators=10,random_state=123), LogisticRegression(C=1,random_state=123)], cv=10, blender=LogisticRegression(C=1,random_state=123)
)
cv_res, yhat = stacker.approx_cross_validation(
X, y, cv=10, method="predict_proba", scoring=["accuracy"], return_predict=True, verbose=False
)
assert cv_res is not None
assert isinstance(cv_res, pd.DataFrame)
assert cv_res.shape[0] == 10
assert "test_accuracy" in cv_res
assert "train_accuracy" in cv_res
assert yhat.ndim == 2
assert yhat.shape == (y.shape[0], 2)
with pytest.raises(NotFittedError):
stacker.predict(X)
for m in stacker.models:
with pytest.raises(NotFittedError):
m.predict(X)
def _verif_all():
test_OutSamplerTransformer_classifier()
test_OutSamplerTransformer_regressor()
test_OutSamplerTransformer_classifier_fit_transform()
test_OutSamplerTransformer_regressor_fit_transform()
test_approx_cross_validation_OutSamplerTransformer_regressor()
test_approx_cross_validation_OutSamplerTransformer_classifier()
test_StackerClassifier()
test_StackerRegressor()
test_approx_cross_validation_StackerClassifier()
test_approx_cross_validation_StackerRegressor()
| [((644, 663), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (658, 663), True, 'import numpy as np\n'), ((672, 696), 'numpy.random.randn', 'np.random.randn', (['(100)', '(10)'], {}), '(100, 10)\n', (687, 696), True, 'import numpy as np\n'), ((1734, 1753), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (1748, 1753), True, 'import numpy as np\n'), ((1762, 1785), 'numpy.random.randn', 'np.random.randn', (['(100)', '(2)'], {}), '(100, 2)\n', (1777, 1785), True, 'import numpy as np\n'), ((1794, 1852), 'numpy.array', 'np.array', (["(['AA'] * 33 + ['BB'] * 33 + ['CC'] * 33 + ['DD'])"], {}), "(['AA'] * 33 + ['BB'] * 33 + ['CC'] * 33 + ['DD'])\n", (1802, 1852), True, 'import numpy as np\n'), ((2092, 2116), 'numpy.random.randn', 'np.random.randn', (['(100)', '(10)'], {}), '(100, 10)\n', (2107, 2116), True, 'import numpy as np\n'), ((2166, 2216), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': '(123)'}), '(n_splits=10, shuffle=True, random_state=123)\n', (2171, 2216), False, 'from sklearn.model_selection import KFold\n'), ((2601, 2620), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (2615, 2620), True, 'import numpy as np\n'), ((2629, 2653), 'numpy.random.randn', 'np.random.randn', (['(100)', '(10)'], {}), '(100, 10)\n', (2644, 2653), True, 'import numpy as np\n'), ((2662, 2682), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (2677, 2682), True, 'import numpy as np\n'), ((3162, 3181), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (3176, 3181), True, 'import numpy as np\n'), ((3190, 3214), 'numpy.random.randn', 'np.random.randn', (['(100)', '(10)'], {}), '(100, 10)\n', (3205, 3214), True, 'import numpy as np\n'), ((3223, 3243), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (3238, 3243), True, 'import numpy as np\n'), ((3254, 3304), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': '(123)'}), '(n_splits=10, shuffle=True, random_state=123)\n', (3259, 3304), False, 'from sklearn.model_selection import KFold\n'), ((3665, 3684), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (3679, 3684), True, 'import numpy as np\n'), ((3693, 3717), 'numpy.random.randn', 'np.random.randn', (['(100)', '(10)'], {}), '(100, 10)\n', (3708, 3717), True, 'import numpy as np\n'), ((3726, 3746), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (3741, 3746), True, 'import numpy as np\n'), ((4102, 4152), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': '(123)'}), '(n_splits=10, shuffle=True, random_state=123)\n', (4107, 4152), False, 'from sklearn.model_selection import KFold\n'), ((4526, 4551), 'numpy.zeros', 'np.zeros', (['(y.shape[0], 1)'], {}), '((y.shape[0], 1))\n', (4534, 4551), True, 'import numpy as np\n'), ((4925, 4944), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (4939, 4944), True, 'import numpy as np\n'), ((4953, 4977), 'numpy.random.randn', 'np.random.randn', (['(100)', '(10)'], {}), '(100, 10)\n', (4968, 4977), True, 'import numpy as np\n'), ((5445, 5495), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': '(123)'}), '(n_splits=10, shuffle=True, random_state=123)\n', (5450, 5495), False, 'from sklearn.model_selection import KFold\n'), ((5976, 6001), 'numpy.zeros', 'np.zeros', (['(y.shape[0], 1)'], {}), '((y.shape[0], 1))\n', (5984, 6001), True, 'import numpy as np\n'), ((6351, 6370), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (6365, 6370), True, 'import numpy as np\n'), ((6379, 6403), 'numpy.random.randn', 'np.random.randn', (['(100)', '(10)'], {}), '(100, 10)\n', (6394, 6403), True, 'import numpy as np\n'), ((6412, 6432), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (6427, 6432), True, 'import numpy as np\n'), ((6728, 6749), 'sklearn.base.is_regressor', 'is_regressor', (['stacker'], {}), '(stacker)\n', (6740, 6749), False, 'from sklearn.base import is_regressor, is_classifier\n'), ((6965, 6984), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (6979, 6984), True, 'import numpy as np\n'), ((6993, 7017), 'numpy.random.randn', 'np.random.randn', (['(100)', '(10)'], {}), '(100, 10)\n', (7008, 7017), True, 'import numpy as np\n'), ((7596, 7618), 'sklearn.base.is_classifier', 'is_classifier', (['stacker'], {}), '(stacker)\n', (7609, 7618), False, 'from sklearn.base import is_regressor, is_classifier\n'), ((7679, 7698), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (7693, 7698), True, 'import numpy as np\n'), ((7707, 7731), 'numpy.random.randn', 'np.random.randn', (['(100)', '(10)'], {}), '(100, 10)\n', (7722, 7731), True, 'import numpy as np\n'), ((7740, 7760), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (7755, 7760), True, 'import numpy as np\n'), ((8598, 8617), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (8612, 8617), True, 'import numpy as np\n'), ((8626, 8650), 'numpy.random.randn', 'np.random.randn', (['(100)', '(10)'], {}), '(100, 10)\n', (8641, 8650), True, 'import numpy as np\n'), ((771, 828), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(123)'}), '(n_estimators=10, random_state=123)\n', (793, 828), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((933, 953), 'sklearn.base.is_classifier', 'is_classifier', (['model'], {}), '(model)\n', (946, 953), False, 'from sklearn.base import is_regressor, is_classifier\n'), ((969, 988), 'sklearn.base.is_regressor', 'is_regressor', (['model'], {}), '(model)\n', (981, 988), False, 'from sklearn.base import is_regressor, is_classifier\n'), ((1162, 1187), 'numpy.array', 'np.array', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (1170, 1187), True, 'import numpy as np\n'), ((1253, 1310), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(123)'}), '(n_estimators=10, random_state=123)\n', (1275, 1310), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1888, 1945), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(123)'}), '(n_estimators=10, random_state=123)\n', (1910, 1945), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2252, 2293), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1)', 'random_state': '(123)'}), '(C=1, random_state=123)\n', (2270, 2293), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((2384, 2425), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1)', 'random_state': '(123)'}), '(C=1, random_state=123)\n', (2402, 2425), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((2718, 2774), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(10)', 'random_state': '(123)'}), '(n_estimators=10, random_state=123)\n', (2739, 2774), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((2879, 2899), 'sklearn.base.is_classifier', 'is_classifier', (['model'], {}), '(model)\n', (2892, 2899), False, 'from sklearn.base import is_regressor, is_classifier\n'), ((2915, 2934), 'sklearn.base.is_regressor', 'is_regressor', (['model'], {}), '(model)\n', (2927, 2934), False, 'from sklearn.base import is_regressor, is_classifier\n'), ((3340, 3356), 'sklearn.dummy.DummyRegressor', 'DummyRegressor', ([], {}), '()\n', (3354, 3356), False, 'from sklearn.dummy import DummyRegressor\n'), ((3448, 3464), 'sklearn.dummy.DummyRegressor', 'DummyRegressor', ([], {}), '()\n', (3462, 3464), False, 'from sklearn.dummy import DummyRegressor\n'), ((3782, 3821), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(123)'}), '(random_state=123)\n', (3803, 3821), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((4034, 4063), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (4047, 4063), False, 'import pytest\n'), ((4188, 4204), 'sklearn.dummy.DummyRegressor', 'DummyRegressor', ([], {}), '()\n', (4202, 4204), False, 'from sklearn.dummy import DummyRegressor\n'), ((4608, 4624), 'sklearn.dummy.DummyRegressor', 'DummyRegressor', ([], {}), '()\n', (4622, 4624), False, 'from sklearn.dummy import DummyRegressor\n'), ((5052, 5092), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(123)'}), '(random_state=123)\n', (5074, 5092), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5305, 5334), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (5318, 5334), False, 'import pytest\n'), ((5373, 5402), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (5386, 5402), False, 'import pytest\n'), ((5530, 5571), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1)', 'random_state': '(123)'}), '(C=1, random_state=123)\n', (5548, 5571), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((5652, 5693), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1)', 'random_state': '(123)'}), '(C=1, random_state=123)\n', (5670, 5693), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((6058, 6078), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (6076, 6078), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((6765, 6787), 'sklearn.base.is_classifier', 'is_classifier', (['stacker'], {}), '(stacker)\n', (6778, 6787), False, 'from sklearn.base import is_regressor, is_classifier\n'), ((6798, 6827), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (6811, 6827), False, 'import pytest\n'), ((6872, 6901), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (6885, 6901), False, 'import pytest\n'), ((7563, 7584), 'sklearn.base.is_regressor', 'is_regressor', (['stacker'], {}), '(stacker)\n', (7575, 7584), False, 'from sklearn.base import is_regressor, is_classifier\n'), ((8380, 8409), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (8393, 8409), False, 'import pytest\n'), ((9325, 9354), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (9338, 9354), False, 'import pytest\n'), ((710, 730), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (725, 730), True, 'import numpy as np\n'), ((1188, 1216), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)', '(100)'], {}), '(0, 3, 100)\n', (1205, 1216), True, 'import numpy as np\n'), ((2130, 2150), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (2145, 2150), True, 'import numpy as np\n'), ((4991, 5011), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (5006, 5011), True, 'import numpy as np\n'), ((6571, 6594), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(123)'}), '(random_state=123)\n', (6576, 6594), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((7031, 7051), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (7046, 7051), True, 'import numpy as np\n'), ((7207, 7248), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1)', 'random_state': '(123)'}), '(C=1, random_state=123)\n', (7225, 7248), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((7899, 7922), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(123)'}), '(random_state=123)\n', (7904, 7922), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((8481, 8510), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (8494, 8510), False, 'import pytest\n'), ((8664, 8684), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (8679, 8684), True, 'import numpy as np\n'), ((8856, 8897), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1)', 'random_state': '(123)'}), '(C=1, random_state=123)\n', (8874, 8897), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((9426, 9455), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (9439, 9455), False, 'import pytest\n'), ((1001, 1032), 'numpy.abs', 'np.abs', (['(p1[:, (1)] - p2[:, (0)])'], {}), '(p1[:, (1)] - p2[:, (0)])\n', (1007, 1032), True, 'import numpy as np\n'), ((1476, 1491), 'numpy.abs', 'np.abs', (['(p1 - p2)'], {}), '(p1 - p2)\n', (1482, 1491), True, 'import numpy as np\n'), ((2947, 2970), 'numpy.abs', 'np.abs', (['(y1 - y2[:, (0)])'], {}), '(y1 - y2[:, (0)])\n', (2953, 2970), True, 'import numpy as np\n'), ((6473, 6529), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(10)', 'random_state': '(123)'}), '(n_estimators=10, random_state=123)\n', (6494, 6529), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((6530, 6553), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(123)'}), '(random_state=123)\n', (6535, 6553), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((7107, 7147), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(123)'}), '(random_state=123)\n', (7129, 7147), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((7149, 7190), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1)', 'random_state': '(123)'}), '(C=1, random_state=123)\n', (7167, 7190), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((7801, 7857), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(10)', 'random_state': '(123)'}), '(n_estimators=10, random_state=123)\n', (7822, 7857), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((7858, 7881), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'random_state': '(123)'}), '(random_state=123)\n', (7863, 7881), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((8740, 8797), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'random_state': '(123)'}), '(n_estimators=10, random_state=123)\n', (8762, 8797), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((8798, 8839), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1)', 'random_state': '(123)'}), '(C=1, random_state=123)\n', (8816, 8839), False, 'from sklearn.linear_model import LogisticRegression, Ridge\n'), ((2480, 2495), 'numpy.abs', 'np.abs', (['(y1 - y2)'], {}), '(y1 - y2)\n', (2486, 2495), True, 'import numpy as np\n'), ((3520, 3535), 'numpy.abs', 'np.abs', (['(y1 - y2)'], {}), '(y1 - y2)\n', (3526, 3535), True, 'import numpy as np\n')] |
odrolliv13/Hex-Photos | employee/views/check_rental.py | d1b42b63394783164f843fe6343491f04fe11e0c | from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as pmod
from . import templater
from django.conf import settings
import decimal, datetime
# This view will display all users and then on a new page display all the current rentals for a given user
def process_request(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/shop')
if request.user.is_staff == False:
return HttpResponseRedirect('/shop')
if request.urlparams[0] == "":
#This form will display all users
form = CheckRentalForm(initial ={
'user': "",
})
if request.method == 'POST':
form = CheckRentalForm(request.POST)
if form.is_valid():
#From here the page will redirect to show all the current rentals for the user picked
complete = "/employee/customer_rentals/" + str(form.cleaned_data['user'].id)
return HttpResponseRedirect(complete)
tvars = {
'form': form,
}
return templater.render_to_response(request, 'return_rental.html', tvars)
else:
try:
complete_rental = pmod.Rental.objects.get(id=request.urlparams[0])
form = CheckRentalForm(initial ={
'user': "",
})
except:
pass
form = "dfd"
tvars = {
'form': form,
}
return templater.render_to_response(request, 'return_rental.html', tvars)
class CheckRentalForm(forms.Form):
user = forms.ModelChoiceField(queryset=pmod.User.objects.exclude(is_active=False), label="User", widget=forms.Select(attrs={'class':'form-control'})) | [((444, 473), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/shop"""'], {}), "('/shop')\n", (464, 473), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404\n'), ((523, 552), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""/shop"""'], {}), "('/shop')\n", (543, 552), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404\n'), ((1162, 1210), 'manager.models.Rental.objects.get', 'pmod.Rental.objects.get', ([], {'id': 'request.urlparams[0]'}), '(id=request.urlparams[0])\n', (1185, 1210), True, 'from manager import models as pmod\n'), ((1501, 1543), 'manager.models.User.objects.exclude', 'pmod.User.objects.exclude', ([], {'is_active': '(False)'}), '(is_active=False)\n', (1526, 1543), True, 'from manager import models as pmod\n'), ((1566, 1611), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1578, 1611), False, 'from django import forms\n'), ((974, 1004), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['complete'], {}), '(complete)\n', (994, 1004), False, 'from django.http import HttpResponse, HttpResponseRedirect, Http404\n')] |
nguyenngtt/GSE---TEAM-A | jupyter/settings.py | 4f78c1ace051d4f2ff30a039aa481aa9b79d3242 | import pandas as pd
import numpy as np
import os
import logging
# suppress warnings
import warnings;
warnings.filterwarnings('ignore');
from tqdm.autonotebook import tqdm
# register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm`
tqdm.pandas()
# https://pandas.pydata.org/pandas-docs/stable/user_guide/options.html#available-options
# adjust pandas display
pd.options.display.max_columns = 30 # default 20
pd.options.display.max_rows = 200 # default 60
pd.options.display.float_format = '{:.2f}'.format
# pd.options.display.precision = 2
pd.options.display.max_colwidth = 200 # default 50; None = all
# Number of array items in summary at beginning and end of each dimension
# np.set_printoptions(edgeitems=3) # default 3
np.set_printoptions(suppress=True) # no scientific notation for small numbers
# IPython (Jupyter) setting:
# Print out every value instead of just "last_expr" (default)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import matplotlib as mpl
from matplotlib import pyplot as plt
# defaults: mpl.rcParamsDefault
rc_params = {'figure.figsize': (8, 4),
'axes.labelsize': 'large',
'axes.titlesize': 'large',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'savefig.dpi': 100,
'figure.dpi': 100 }
# adjust matplotlib defaults
mpl.rcParams.update(rc_params)
import seaborn as sns
sns.set_style("darkgrid")
# sns.set()
| [((103, 136), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (126, 136), False, 'import warnings\n'), ((252, 265), 'tqdm.autonotebook.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (263, 265), False, 'from tqdm.autonotebook import tqdm\n'), ((746, 780), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (765, 780), True, 'import numpy as np\n'), ((1429, 1459), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['rc_params'], {}), '(rc_params)\n', (1448, 1459), True, 'import matplotlib as mpl\n'), ((1483, 1508), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (1496, 1508), True, 'import seaborn as sns\n')] |
jeanbez/spack | var/spack/repos/builtin/packages/py-cyvcf2/package.py | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyCyvcf2(PythonPackage):
"""fast vcf parsing with cython + htslib"""
homepage = "https://github.com/brentp/cyvcf2"
pypi = "cyvcf2/cyvcf2-0.11.7.tar.gz"
version('0.11.7', sha256='a4b6229b89a0a1043684c65cbdd702c366a8800dc3591fb44c4b5a08640cbeec')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-cython@0.23.3:', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-coloredlogs', type=('build', 'run'))
depends_on('py-click', type=('build', 'run'))
depends_on('curl')
| [] |
mottaquikarim/pydev-psets | pset_functions/db_search/p1.py | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | """
GPA Calculator
"""
# Write a function called "simple_gpa" to find GPA when student enters a letter grade as a string. Assign the result to a variable called "gpa".
"""
Use these conversions:
A+ --> 4.0
A --> 4.0
A- --> 3.7
B+ --> 3.3
B --> 3.0
B- --> 2.7
C+ --> 2.3
C --> 2.0
C- --> 1.7
D+ --> 1.3
D --> 1.0
D- --> 0.7
F --> 0.0
"""
| [] |
flying-sheep/SoundCard | test_soundcard.py | b476c8142b460fc8161d374b282fe846d72a0780 | import sys
import soundcard
import numpy
import pytest
ones = numpy.ones(1024)
signal = numpy.concatenate([[ones], [-ones]]).T
def test_speakers():
for speaker in soundcard.all_speakers():
assert isinstance(speaker.name, str)
assert hasattr(speaker, 'id')
assert isinstance(speaker.channels, int)
assert speaker.channels > 0
def test_microphones():
for microphone in soundcard.all_microphones():
assert isinstance(microphone.name, str)
assert hasattr(microphone, 'id')
assert isinstance(microphone.channels, int)
assert microphone.channels > 0
def test_default_playback():
soundcard.default_speaker().play(signal, 44100, channels=2)
def test_default_record():
recording = soundcard.default_microphone().record(1024, 44100)
assert len(recording == 1024)
def test_default_blockless_record():
recording = soundcard.default_microphone().record(None, 44100)
@pytest.fixture
def loopback_speaker():
import sys
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_speaker('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_speaker('Soundflower64')
elif sys.platform == 'linux':
# pacmd load-module module-null-sink channels=6 rate=48000
return soundcard.get_speaker('Null')
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_player(loopback_speaker):
with loopback_speaker.player(48000, channels=2, blocksize=512) as player:
yield player
@pytest.fixture
def loopback_microphone():
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_microphone('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_microphone('Soundflower64')
elif sys.platform == 'linux':
return soundcard.get_microphone('Null', include_loopback=True)
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_recorder(loopback_microphone):
with loopback_microphone.recorder(48000, channels=2, blocksize=512) as recorder:
yield recorder
def test_loopback_playback(loopback_player, loopback_recorder):
loopback_player.play(signal)
recording = loopback_recorder.record(1024*10)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
def test_loopback_reverse_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[1, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_reverse_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[1, 0], blocksize=512) as loopback_player:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_mono_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[0], blocksize=512) as loopback_player:
loopback_player.play(signal[:,0])
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
if sys.platform == 'linux':
# unmapped channels on linux are filled with the mean of other channels
assert right.mean() < left.mean()
else:
assert abs(right.mean()) < 0.01 # something like zero
assert (left > 0.5).sum() == len(signal)
def test_loopback_mono_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 1 or recording.shape[1] == 1
assert recording.mean() > 0
assert (recording > 0.5).sum() == len(signal)
def test_loopback_multichannel_channelmap(loopback_speaker, loopback_microphone):
with loopback_speaker.player(48000, channels=[2, 0], blocksize=512) as loopback_player:
with loopback_microphone.recorder(48000, channels=[2, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
| [((63, 79), 'numpy.ones', 'numpy.ones', (['(1024)'], {}), '(1024)\n', (73, 79), False, 'import numpy\n'), ((89, 125), 'numpy.concatenate', 'numpy.concatenate', (['[[ones], [-ones]]'], {}), '([[ones], [-ones]])\n', (106, 125), False, 'import numpy\n'), ((169, 193), 'soundcard.all_speakers', 'soundcard.all_speakers', ([], {}), '()\n', (191, 193), False, 'import soundcard\n'), ((410, 437), 'soundcard.all_microphones', 'soundcard.all_microphones', ([], {}), '()\n', (435, 437), False, 'import soundcard\n'), ((1114, 1144), 'soundcard.get_speaker', 'soundcard.get_speaker', (['"""Cable"""'], {}), "('Cable')\n", (1135, 1144), False, 'import soundcard\n'), ((1806, 1839), 'soundcard.get_microphone', 'soundcard.get_microphone', (['"""Cable"""'], {}), "('Cable')\n", (1830, 1839), False, 'import soundcard\n'), ((653, 680), 'soundcard.default_speaker', 'soundcard.default_speaker', ([], {}), '()\n', (678, 680), False, 'import soundcard\n'), ((757, 787), 'soundcard.default_microphone', 'soundcard.default_microphone', ([], {}), '()\n', (785, 787), False, 'import soundcard\n'), ((896, 926), 'soundcard.default_microphone', 'soundcard.default_microphone', ([], {}), '()\n', (924, 926), False, 'import soundcard\n'), ((1230, 1268), 'soundcard.get_speaker', 'soundcard.get_speaker', (['"""Soundflower64"""'], {}), "('Soundflower64')\n", (1251, 1268), False, 'import soundcard\n'), ((1925, 1966), 'soundcard.get_microphone', 'soundcard.get_microphone', (['"""Soundflower64"""'], {}), "('Soundflower64')\n", (1949, 1966), False, 'import soundcard\n'), ((1385, 1414), 'soundcard.get_speaker', 'soundcard.get_speaker', (['"""Null"""'], {}), "('Null')\n", (1406, 1414), False, 'import soundcard\n'), ((2016, 2071), 'soundcard.get_microphone', 'soundcard.get_microphone', (['"""Null"""'], {'include_loopback': '(True)'}), "('Null', include_loopback=True)\n", (2040, 2071), False, 'import soundcard\n')] |
jaiveergill/Last-Three-Digits-of-11-x | Last 3 digits of 11^x.py | def4519b9b46e41b4c4f2b3a5dbe5566316dd83e | # This is a simple program to find the last three digits of 11 raised to any given number.
# The main algorithm that does the work is on line 10
def trim_num(num):
if len(str(num)) > 3: # no need to trim if the number is 3 or less digits long
return str(num)[(len(str(num)) - 3):] # trims the number
return num
def main(exp):
init_val = str((((exp-1) * (exp))/2) % 10 + (exp % 100) / 10) + str(exp % 10) + "1" # The main algorithm which needs to be cleaned (only the last three digits should be shown)
return "{}".format(trim_num(init_val))
# To use it, simply copy the code and run the function
| [] |
ljb2208/osr-rover-code | osr_odometry/scripts/osr_odom_ackerman2.py | f4791d835cd760446777a226d37bb3114256affd | #!/usr/bin/env python
import time
from osr_msgs.msg import Joystick, Commands, Encoder, RunStop
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3
import rospy
import tf
import math
import numpy
class Odometry2():
def __init__(self, baseFrame, wheelTrack, mpt, d4, maxTickPerSec, pubTF=False):
self.encValid = False
self.priorTime = rospy.Time.now()
self.priorEncs = [0,0,0,0,0,0]
self.mpt = mpt
self.pubTF = pubTF
# distance between wheels
self.wheelTrack = wheelTrack
self.d4 = d4
self.baseFrame = baseFrame
self.maxTickPerSec = maxTickPerSec
self.x = 0.0
self.y = 0.0
self.th = 0.0
self.odomPub = rospy.Publisher("/odom", Odometry, queue_size = 1)
if self.pubTF:
self.odomBroadcaster = tf.TransformBroadcaster()
self.twistCovar = numpy.diag([0.001, 0.001, 0.001, 0.1, 0.1, 0.1]).ravel()
self.poseCovar = numpy.diag([0.001, 0.001, 0.001, 0.1, 0.1, 0.1]).ravel()
def onEncoderMessage(self, message):
self.calculateOdometry(message)
def isValid(self, message):
dencLeft = abs(message.rel_enc[1] - self.priorEncs[1])
dencRight = abs(message.rel_enc[4] - self.priorEncs[4])
dt = self.getElapsedTime(message.header.stamp)
if (dencLeft/dt) > self.maxTickPerSec:
rospy.logwarn("Invalid relative encoder value on left wheel. No odom calculated")
return False
if (dencRight/dt) > self.maxTickPerSec:
rospy.logwarn("Invalid relative encoder value on right wheel. No odom calculated")
return False
return True
def publishTransform(self, x, y, quaternion, timestamp):
self.odomBroadcaster.sendTransform(
(x, y, 0),
(quaternion.x, quaternion.y, quaternion.z, quaternion.w),
timestamp,
self.baseFrame,
"odom")
def publishOdomMessage(self, x, y, vx, vy, vth, quaternion, timestamp):
odom = Odometry()
odom.header.frame_id = "odom"
odom.child_frame_id = self.baseFrame
odom.header.stamp = timestamp
odom.pose.pose.position.x = x
odom.pose.pose.position.y = y
odom.pose.pose.position.z = 0
odom.pose.covariance = self.poseCovar
odom.pose.pose.orientation = quaternion
odom.twist.twist.linear.x = vx
odom.twist.twist.linear.y = vy
odom.twist.twist.linear.z = 0
odom.twist.twist.angular.z = vth
odom.twist.covariance = self.twistCovar
self.odomPub.publish(odom)
def getElapsedTime(self, timestamp, save=False):
dt = (timestamp - self.priorTime).to_sec()
if save:
self.priorTime = timestamp
return dt
def calculateTurnRadius(self, dLeft, dRight):
dlr = dLeft - dRight
# calculate radius of turn
if dlr != 0 and dLeft != 0 and dRight != 0:
lv = self.d4 + dLeft / dRight * self.d4
# print ("lv: " + str(lv))
r = lv / (1 - (dLeft / dRight))
else:
r = 0
dist = (dLeft + dRight) / 2
# calculate angle change
if (r != 0):
dTheta = dist / -r
else:
dTheta = 0
return r, dTheta
def calculateOdometry(self, message):
currentTime = message.header.stamp
encs = message.rel_enc
if not self.isValid(message):
return
dt = self.getElapsedTime(currentTime, save=True)
dLeft = self.mpt * (encs[1] - self.priorEncs[1])
dRight = self.mpt * (encs[4] - self.priorEncs[4])
# dth = (dRight - dLeft) / self.wheelTrack
radius, dTheta = self.calculateTurnRadius(dLeft, dRight)
# calculate centre of turn circle
xOrig = self.x + radius * math.cos(self.th)
yOrig = self.y + radius * math.sin(self.th)
# calculate new co-ordinates
xNew = xOrig + (self.x - xOrig) * math.cos(dTheta) - (self.y - yOrig) * math.sin(dTheta)
yNew = yOrig + (self.x - xOrig) * math.sin(dTheta) + (self.y - yOrig) * math.cos(dTheta)
#calculate change in x,y values
dx = xNew - self.x
dy = yNew - self.y
self.th += dTheta
if (self.th > (math.pi * 2)):
self.th -= (math.pi * 2)
elif (self.th < (-math.pi * 2)):
self.th += (math.pi * 2)
self.x = xNew
self.y = yNew
# convert to ros co-ords
xRos = self.y
yRos = -self.x
vxRos = dy / dt
vyRos = -dx / dt
vth = dTheta /dt
quaternion = self.getQuaternion(self.th)
if self.pubTF:
self.publishTransform(xRos, yRos, quaternion, currentTime)
self.publishOdomMessage(xRos, yRos, vxRos, vyRos, vth, quaternion, currentTime)
self.priorEncs = encs
def getQuaternion(self, th):
quaternion = Quaternion()
quaternion.x = 0.0
quaternion.y = 0.0
quaternion.z = math.sin(th / 2.0)
quaternion.w = math.cos(th / 2.0)
return quaternion
if __name__ == '__main__':
rospy.init_node('osr_odometry2')
rospy.loginfo("Starting the osr odometry2 node")
baseFrame = rospy.get_param("/odometry/base_frame_id", "base_link")
# mpt = rospy.get_param("/odometry/mpt", 0.000026322)
mpt = rospy.get_param("/odometry/mpt", 0.000100708)
wheelTrack = rospy.get_param("/odometry/wheel_track", 0.455)
d4 = rospy.get_param("/odometry/d4", 0.2559)
maxTickPerSec = rospy.get_param("/odometry/maxTickPerSec", 8000)
publishTF = rospy.get_param("~publishTF", False)
odom = Odometry2(baseFrame, wheelTrack, mpt, d4, maxTickPerSec, pubTF=publishTF)
encSub = rospy.Subscriber("/encoder", Encoder, odom.onEncoderMessage)
rate = rospy.Rate(20)
while not rospy.is_shutdown():
rate.sleep()
| [((5352, 5384), 'rospy.init_node', 'rospy.init_node', (['"""osr_odometry2"""'], {}), "('osr_odometry2')\n", (5367, 5384), False, 'import rospy\n'), ((5389, 5437), 'rospy.loginfo', 'rospy.loginfo', (['"""Starting the osr odometry2 node"""'], {}), "('Starting the osr odometry2 node')\n", (5402, 5437), False, 'import rospy\n'), ((5456, 5511), 'rospy.get_param', 'rospy.get_param', (['"""/odometry/base_frame_id"""', '"""base_link"""'], {}), "('/odometry/base_frame_id', 'base_link')\n", (5471, 5511), False, 'import rospy\n'), ((5580, 5625), 'rospy.get_param', 'rospy.get_param', (['"""/odometry/mpt"""', '(0.000100708)'], {}), "('/odometry/mpt', 0.000100708)\n", (5595, 5625), False, 'import rospy\n'), ((5649, 5696), 'rospy.get_param', 'rospy.get_param', (['"""/odometry/wheel_track"""', '(0.455)'], {}), "('/odometry/wheel_track', 0.455)\n", (5664, 5696), False, 'import rospy\n'), ((5706, 5745), 'rospy.get_param', 'rospy.get_param', (['"""/odometry/d4"""', '(0.2559)'], {}), "('/odometry/d4', 0.2559)\n", (5721, 5745), False, 'import rospy\n'), ((5766, 5814), 'rospy.get_param', 'rospy.get_param', (['"""/odometry/maxTickPerSec"""', '(8000)'], {}), "('/odometry/maxTickPerSec', 8000)\n", (5781, 5814), False, 'import rospy\n'), ((5839, 5875), 'rospy.get_param', 'rospy.get_param', (['"""~publishTF"""', '(False)'], {}), "('~publishTF', False)\n", (5854, 5875), False, 'import rospy\n'), ((5976, 6036), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/encoder"""', 'Encoder', 'odom.onEncoderMessage'], {}), "('/encoder', Encoder, odom.onEncoderMessage)\n", (5992, 6036), False, 'import rospy\n'), ((6054, 6068), 'rospy.Rate', 'rospy.Rate', (['(20)'], {}), '(20)\n', (6064, 6068), False, 'import rospy\n'), ((407, 423), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (421, 423), False, 'import rospy\n'), ((781, 829), 'rospy.Publisher', 'rospy.Publisher', (['"""/odom"""', 'Odometry'], {'queue_size': '(1)'}), "('/odom', Odometry, queue_size=1)\n", (796, 829), False, 'import rospy\n'), ((2130, 2140), 'nav_msgs.msg.Odometry', 'Odometry', ([], {}), '()\n', (2138, 2140), False, 'from nav_msgs.msg import Odometry\n'), ((5134, 5146), 'geometry_msgs.msg.Quaternion', 'Quaternion', ([], {}), '()\n', (5144, 5146), False, 'from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3\n'), ((5226, 5244), 'math.sin', 'math.sin', (['(th / 2.0)'], {}), '(th / 2.0)\n', (5234, 5244), False, 'import math\n'), ((5268, 5286), 'math.cos', 'math.cos', (['(th / 2.0)'], {}), '(th / 2.0)\n', (5276, 5286), False, 'import math\n'), ((6084, 6103), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (6101, 6103), False, 'import rospy\n'), ((903, 928), 'tf.TransformBroadcaster', 'tf.TransformBroadcaster', ([], {}), '()\n', (926, 928), False, 'import tf\n'), ((1455, 1541), 'rospy.logwarn', 'rospy.logwarn', (['"""Invalid relative encoder value on left wheel. No odom calculated"""'], {}), "(\n 'Invalid relative encoder value on left wheel. No odom calculated')\n", (1468, 1541), False, 'import rospy\n'), ((1631, 1718), 'rospy.logwarn', 'rospy.logwarn', (['"""Invalid relative encoder value on right wheel. No odom calculated"""'], {}), "(\n 'Invalid relative encoder value on right wheel. No odom calculated')\n", (1644, 1718), False, 'import rospy\n'), ((957, 1005), 'numpy.diag', 'numpy.diag', (['[0.001, 0.001, 0.001, 0.1, 0.1, 0.1]'], {}), '([0.001, 0.001, 0.001, 0.1, 0.1, 0.1])\n', (967, 1005), False, 'import numpy\n'), ((1039, 1087), 'numpy.diag', 'numpy.diag', (['[0.001, 0.001, 0.001, 0.1, 0.1, 0.1]'], {}), '([0.001, 0.001, 0.001, 0.1, 0.1, 0.1])\n', (1049, 1087), False, 'import numpy\n'), ((3991, 4008), 'math.cos', 'math.cos', (['self.th'], {}), '(self.th)\n', (3999, 4008), False, 'import math\n'), ((4043, 4060), 'math.sin', 'math.sin', (['self.th'], {}), '(self.th)\n', (4051, 4060), False, 'import math\n'), ((4183, 4199), 'math.sin', 'math.sin', (['dTheta'], {}), '(dTheta)\n', (4191, 4199), False, 'import math\n'), ((4280, 4296), 'math.cos', 'math.cos', (['dTheta'], {}), '(dTheta)\n', (4288, 4296), False, 'import math\n'), ((4145, 4161), 'math.cos', 'math.cos', (['dTheta'], {}), '(dTheta)\n', (4153, 4161), False, 'import math\n'), ((4242, 4258), 'math.sin', 'math.sin', (['dTheta'], {}), '(dTheta)\n', (4250, 4258), False, 'import math\n')] |
alisiahkoohi/importance-of-transfer-learning | src/simulation-conditioning/utilities/data-generation-scripts/Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.py | bb4c7943f4ff64a2f1785503328b4cbb4f5111aa | import numpy as np
import h5py
import os
from devito.logger import info
from devito import TimeFunction, clear_cache
from examples.seismic.acoustic import AcousticWaveSolver
from examples.seismic import Model, RickerSource, Receiver, TimeAxis
from math import floor
from scipy.interpolate import griddata
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data_path', dest='data_path', type=str, default='/home/ec2-user/data', help='raw data path')
parser.add_argument('--save_dir', dest='save_dir', type=str, default='/home/ec2-user/data', help='saving directory')
args = parser.parse_args()
data_path = args.data_path
save_dir = args.save_dir
origin = (0., 0.)
spacing=(7.5, 7.5)
tn=1100.
nbpml=40
# Define your vp in km/sec (x, z)
vp = np.fromfile(os.path.join(data_path, 'vp_marmousi_bi'),
dtype='float32', sep="")
vp = np.reshape(vp, (1601, 401))
# vp = vp[400:1401, 0:401]
shape=[401, 301]
values = np.zeros([vp.shape[0]*vp.shape[1], ])
points = np.zeros([vp.shape[0]*vp.shape[1], 2])
k = 0
for indx in range(0, vp.shape[0]):
for indy in range(0, vp.shape[1]):
values[k] = vp[indx, indy]
points[k, 0] = indx
points[k, 1] = indy
k = k + 1
# nx, ny = shape[0], shape[1]
X, Y = np.meshgrid(np.array(np.linspace(1000, 1287, shape[0])), np.array(np.linspace(120, 232, shape[1])))
int_vp = griddata(points, values, (X, Y), method='cubic')
int_vp = np.transpose(int_vp)
vp = int_vp
# create model
model = Model(origin, spacing, shape, 2, vp, nbpml=nbpml)
# Derive timestepping from model spacing
dt = model.critical_dt
t0 = 0.0
nt = int(1 + (tn-t0) / dt) # Number of timesteps
time = np.linspace(t0, tn, nt) # Discretized time axis
datasize0 = int(np.shape(range(0, shape[0], 4))[0])
datasize1 = int(np.shape(range(100, nt, 20))[0])
datasize = datasize0*datasize1
strTrainA = os.path.join(save_dir, 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.hdf5')
strTrainB = os.path.join(save_dir, 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_B_train.hdf5')
dataset_train = "train_dataset"
file_trainA = h5py.File(strTrainA, 'w-')
datasetA = file_trainA.create_dataset(dataset_train, (datasize, shape[0]+2*nbpml, shape[1]+2*nbpml))
file_trainB = h5py.File(strTrainB, 'w-')
datasetB = file_trainB.create_dataset(dataset_train, (datasize, shape[0]+2*nbpml, shape[1]+2*nbpml))
num_rec = 601
rec_samp = np.linspace(0., model.domain_size[0], num=num_rec);
rec_samp = rec_samp[1]-rec_samp[0]
time_range = TimeAxis(start=t0, stop=tn, step=dt)
src = RickerSource(name='src', grid=model.grid, f0=0.025, time_range=time_range, space_order=1, npoint=1)
src.coordinates.data[0, :] = np.array([1*spacing[0], 2*spacing[1]]).astype(np.float32)
rec = Receiver(name='rec', grid=model.grid, time_range=time_range, npoint=num_rec)
rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=num_rec)
rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:]
solverbad = AcousticWaveSolver(model, source=src, receiver=rec, kernel='OT2', isic=True,
space_order=2, freesurface=False)
solvergood = AcousticWaveSolver(model, source=src, receiver=rec, kernel='OT2', isic=True,
space_order=20, freesurface=False)
ulocgood = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=20, save=nt)
ulocbad = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=2, save=nt)
kk = 0
for xsrc in range(0, shape[0], 4):
clear_cache()
ulocgood.data.fill(0.)
ulocbad.data.fill(0.)
src.coordinates.data[0, :] = np.array([xsrc*spacing[0], 2*spacing[1]]).astype(np.float32)
rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=num_rec)
rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:]
_, ulocgood, _ = solvergood.forward(m=model.m, src=src, time=nt-1, save=True)
_, ulocbad, _ = solverbad.forward(m=model.m, src=src, time=nt-1, save=True)
datasetA[kk:(kk+datasize1), :, :] = np.array(ulocgood.data[range(100, nt, 20), :, :])
datasetB[kk:(kk+datasize1), :, :] = np.array(ulocbad.data[range(100, nt, 20), :, :])
kk = kk + datasize1
file_trainA.close()
file_trainB.close()
| [((331, 370), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (354, 370), False, 'import argparse\n'), ((877, 904), 'numpy.reshape', 'np.reshape', (['vp', '(1601, 401)'], {}), '(vp, (1601, 401))\n', (887, 904), True, 'import numpy as np\n'), ((959, 996), 'numpy.zeros', 'np.zeros', (['[vp.shape[0] * vp.shape[1]]'], {}), '([vp.shape[0] * vp.shape[1]])\n', (967, 996), True, 'import numpy as np\n'), ((1006, 1046), 'numpy.zeros', 'np.zeros', (['[vp.shape[0] * vp.shape[1], 2]'], {}), '([vp.shape[0] * vp.shape[1], 2])\n', (1014, 1046), True, 'import numpy as np\n'), ((1385, 1433), 'scipy.interpolate.griddata', 'griddata', (['points', 'values', '(X, Y)'], {'method': '"""cubic"""'}), "(points, values, (X, Y), method='cubic')\n", (1393, 1433), False, 'from scipy.interpolate import griddata\n'), ((1443, 1463), 'numpy.transpose', 'np.transpose', (['int_vp'], {}), '(int_vp)\n', (1455, 1463), True, 'import numpy as np\n'), ((1500, 1549), 'examples.seismic.Model', 'Model', (['origin', 'spacing', 'shape', '(2)', 'vp'], {'nbpml': 'nbpml'}), '(origin, spacing, shape, 2, vp, nbpml=nbpml)\n', (1505, 1549), False, 'from examples.seismic import Model, RickerSource, Receiver, TimeAxis\n'), ((1680, 1703), 'numpy.linspace', 'np.linspace', (['t0', 'tn', 'nt'], {}), '(t0, tn, nt)\n', (1691, 1703), True, 'import numpy as np\n'), ((1877, 1979), 'os.path.join', 'os.path.join', (['save_dir', '"""Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.hdf5"""'], {}), "(save_dir,\n 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.hdf5')\n", (1889, 1979), False, 'import os\n'), ((1988, 2090), 'os.path.join', 'os.path.join', (['save_dir', '"""Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_B_train.hdf5"""'], {}), "(save_dir,\n 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_B_train.hdf5')\n", (2000, 2090), False, 'import os\n'), ((2134, 2160), 'h5py.File', 'h5py.File', (['strTrainA', '"""w-"""'], {}), "(strTrainA, 'w-')\n", (2143, 2160), False, 'import h5py\n'), ((2277, 2303), 'h5py.File', 'h5py.File', (['strTrainB', '"""w-"""'], {}), "(strTrainB, 'w-')\n", (2286, 2303), False, 'import h5py\n'), ((2431, 2482), 'numpy.linspace', 'np.linspace', (['(0.0)', 'model.domain_size[0]'], {'num': 'num_rec'}), '(0.0, model.domain_size[0], num=num_rec)\n', (2442, 2482), True, 'import numpy as np\n'), ((2533, 2569), 'examples.seismic.TimeAxis', 'TimeAxis', ([], {'start': 't0', 'stop': 'tn', 'step': 'dt'}), '(start=t0, stop=tn, step=dt)\n', (2541, 2569), False, 'from examples.seismic import Model, RickerSource, Receiver, TimeAxis\n'), ((2576, 2679), 'examples.seismic.RickerSource', 'RickerSource', ([], {'name': '"""src"""', 'grid': 'model.grid', 'f0': '(0.025)', 'time_range': 'time_range', 'space_order': '(1)', 'npoint': '(1)'}), "(name='src', grid=model.grid, f0=0.025, time_range=time_range,\n space_order=1, npoint=1)\n", (2588, 2679), False, 'from examples.seismic import Model, RickerSource, Receiver, TimeAxis\n'), ((2770, 2846), 'examples.seismic.Receiver', 'Receiver', ([], {'name': '"""rec"""', 'grid': 'model.grid', 'time_range': 'time_range', 'npoint': 'num_rec'}), "(name='rec', grid=model.grid, time_range=time_range, npoint=num_rec)\n", (2778, 2846), False, 'from examples.seismic import Model, RickerSource, Receiver, TimeAxis\n'), ((2876, 2927), 'numpy.linspace', 'np.linspace', (['(0.0)', 'model.domain_size[0]'], {'num': 'num_rec'}), '(0.0, model.domain_size[0], num=num_rec)\n', (2887, 2927), True, 'import numpy as np\n'), ((2998, 3112), 'examples.seismic.acoustic.AcousticWaveSolver', 'AcousticWaveSolver', (['model'], {'source': 'src', 'receiver': 'rec', 'kernel': '"""OT2"""', 'isic': '(True)', 'space_order': '(2)', 'freesurface': '(False)'}), "(model, source=src, receiver=rec, kernel='OT2', isic=True,\n space_order=2, freesurface=False)\n", (3016, 3112), False, 'from examples.seismic.acoustic import AcousticWaveSolver\n'), ((3130, 3245), 'examples.seismic.acoustic.AcousticWaveSolver', 'AcousticWaveSolver', (['model'], {'source': 'src', 'receiver': 'rec', 'kernel': '"""OT2"""', 'isic': '(True)', 'space_order': '(20)', 'freesurface': '(False)'}), "(model, source=src, receiver=rec, kernel='OT2', isic=True,\n space_order=20, freesurface=False)\n", (3148, 3245), False, 'from examples.seismic.acoustic import AcousticWaveSolver\n'), ((3262, 3340), 'devito.TimeFunction', 'TimeFunction', ([], {'name': '"""u"""', 'grid': 'model.grid', 'time_order': '(2)', 'space_order': '(20)', 'save': 'nt'}), "(name='u', grid=model.grid, time_order=2, space_order=20, save=nt)\n", (3274, 3340), False, 'from devito import TimeFunction, clear_cache\n'), ((3351, 3428), 'devito.TimeFunction', 'TimeFunction', ([], {'name': '"""u"""', 'grid': 'model.grid', 'time_order': '(2)', 'space_order': '(2)', 'save': 'nt'}), "(name='u', grid=model.grid, time_order=2, space_order=2, save=nt)\n", (3363, 3428), False, 'from devito import TimeFunction, clear_cache\n'), ((792, 833), 'os.path.join', 'os.path.join', (['data_path', '"""vp_marmousi_bi"""'], {}), "(data_path, 'vp_marmousi_bi')\n", (804, 833), False, 'import os\n'), ((3478, 3491), 'devito.clear_cache', 'clear_cache', ([], {}), '()\n', (3489, 3491), False, 'from devito import TimeFunction, clear_cache\n'), ((3674, 3725), 'numpy.linspace', 'np.linspace', (['(0.0)', 'model.domain_size[0]'], {'num': 'num_rec'}), '(0.0, model.domain_size[0], num=num_rec)\n', (3685, 3725), True, 'import numpy as np\n'), ((1296, 1329), 'numpy.linspace', 'np.linspace', (['(1000)', '(1287)', 'shape[0]'], {}), '(1000, 1287, shape[0])\n', (1307, 1329), True, 'import numpy as np\n'), ((1341, 1372), 'numpy.linspace', 'np.linspace', (['(120)', '(232)', 'shape[1]'], {}), '(120, 232, shape[1])\n', (1352, 1372), True, 'import numpy as np\n'), ((2705, 2747), 'numpy.array', 'np.array', (['[1 * spacing[0], 2 * spacing[1]]'], {}), '([1 * spacing[0], 2 * spacing[1]])\n', (2713, 2747), True, 'import numpy as np\n'), ((3580, 3625), 'numpy.array', 'np.array', (['[xsrc * spacing[0], 2 * spacing[1]]'], {}), '([xsrc * spacing[0], 2 * spacing[1]])\n', (3588, 3625), True, 'import numpy as np\n')] |
divine-coder/CODECHEF-PYTHON | facto.py | a1e34d6f9f75cf7b9497f1ef2f937cb4f64f1543 | import math
if __name__=='__main__':
n=(int)(input())
for abc in range(n):
t=(int)(input())
print math.factorial(t)
| [] |
johnmartingodo/pyKinematicsKineticsToolbox | setup.py | 4ffc99885f3c637b8c33914a4e50ccb4595fc844 | from setuptools import setup
setup(name="pykinematicskineticstoolbox",
version="0.0",
description="Installable python package which collects useful kinematics and kinetics functions",
author="John Martin K. Godø",
author_email="john.martin.kleven.godo@gmail.com",
license="MIT",
packages=["pykinematicskineticstoolbox"],
install_requires=["numpy"],
)
| [((30, 369), 'setuptools.setup', 'setup', ([], {'name': '"""pykinematicskineticstoolbox"""', 'version': '"""0.0"""', 'description': '"""Installable python package which collects useful kinematics and kinetics functions"""', 'author': '"""John Martin K. Godø"""', 'author_email': '"""john.martin.kleven.godo@gmail.com"""', 'license': '"""MIT"""', 'packages': "['pykinematicskineticstoolbox']", 'install_requires': "['numpy']"}), "(name='pykinematicskineticstoolbox', version='0.0', description=\n 'Installable python package which collects useful kinematics and kinetics functions'\n , author='John Martin K. Godø', author_email=\n 'john.martin.kleven.godo@gmail.com', license='MIT', packages=[\n 'pykinematicskineticstoolbox'], install_requires=['numpy'])\n", (35, 369), False, 'from setuptools import setup\n')] |
bit0fun/plugins | summary/summary_avail.py | 1f6f701bf1e60882b8fa61cb735e7033c8c29e3c | from datetime import datetime
# ensure an rpc peer is added
def addpeer(p, rpcpeer):
pid = rpcpeer['id']
if pid not in p.persist['peerstate']:
p.persist['peerstate'][pid] = {
'connected': rpcpeer['connected'],
'last_seen': datetime.now() if rpcpeer['connected'] else None,
'avail': 1.0 if rpcpeer['connected'] else 0.0
}
# exponetially smooth online/offline states of peers
def trace_availability(p, rpcpeers):
p.persist['availcount'] += 1
leadwin = max(min(p.avail_window, p.persist['availcount'] * p.avail_interval), p.avail_interval)
samples = leadwin / p.avail_interval
alpha = 1.0 / samples
beta = 1.0 - alpha
for rpcpeer in rpcpeers['peers']:
pid = rpcpeer['id']
addpeer(p, rpcpeer)
if rpcpeer['connected']:
p.persist['peerstate'][pid]['last_seen'] = datetime.now()
p.persist['peerstate'][pid]['connected'] = True
p.persist['peerstate'][pid]['avail'] = 1.0 * alpha + p.persist['peerstate'][pid]['avail'] * beta
else:
p.persist['peerstate'][pid]['connected'] = False
p.persist['peerstate'][pid]['avail'] = 0.0 * alpha + p.persist['peerstate'][pid]['avail'] * beta
| [((883, 897), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (895, 897), False, 'from datetime import datetime\n'), ((265, 279), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (277, 279), False, 'from datetime import datetime\n')] |
hugovk/python-terrascript | terrascript/dns/r.py | 08fe185904a70246822f5cfbdc9e64e9769ec494 | # terrascript/dns/r.py
import terrascript
class dns_a_record_set(terrascript.Resource):
pass
class dns_aaaa_record_set(terrascript.Resource):
pass
class dns_cname_record(terrascript.Resource):
pass
class dns_mx_record_set(terrascript.Resource):
pass
class dns_ns_record_set(terrascript.Resource):
pass
class dns_ptr_record(terrascript.Resource):
pass
class dns_srv_record_set(terrascript.Resource):
pass
class dns_txt_record_set(terrascript.Resource):
pass
| [] |
gneumann333/jumpscaleX_core | JumpscaleCore/clients/tcprouter/TCPRouterFactory.py | 777d249fa3668c6e802c2f765f4b82fb39c3e5fa | from Jumpscale import j
from .TCPRouterClient import TCPRouterClient
JSConfigs = j.baseclasses.object_config_collection
class TCPRouterFactory(JSConfigs):
__jslocation__ = "j.clients.tcp_router"
_CHILDCLASS = TCPRouterClient
def test(self):
"""
kosmos 'j.clients.tcp_router.test()'
"""
# get a client instance (TO CHECK: secret is already assigned to backend)
cl = self.get(
"test_instance",
local_ip="0.0.0.0",
local_port=18000,
remote_url="127.0.0.1",
remote_port=6379,
secret="test",
)
# connect to backend
cl.connect()
# stop connection
cl.stop()
print("TEST OK")
| [] |
miguelarbesu/nmrglue | nmrglue/fileio/spinsolve.py | 6ca36de7af1a2cf109f40bf5afe9c1ce73c9dcdc | """
Functions for reading Magritek Spinsolve binary (dx/1d) files and
parameter (acqu.par/proc.par) files.
"""
import os
from warnings import warn
import numpy as np
from . import fileiobase
from . import jcampdx
__developer_info__ = """
Spinsolve is the software used on the Magritek benchtop NMR devices.
A spectrum is saved in a folder with several files. The spectral data is
stored in these files: 'data.1d' (FID), 'spectrum.1d' (Fourier transformed)
and 'spectrum_processed.1d' (FT + processed by spinsolve)
Optional spectral data (System->Prefs->Setup->Global data storage):
'nmr_fid.dx' (FID stored in `JCAMP-DX standard <http://www.jcamp-dx.org/>`),
'spectrum.csv' and 'spectrum_processed.csv' (FT + processed by Spinsovle with ppm for each
point and intensity delimited by ';')
Other files:
'acqu.par' - all parameters that are used for acquisition
'Protocol.par' - text file used to reload data back into the Spinsolve software
'processing.script' - text file to transfer Spinsolve software protocol settings
into MNOVA
The Spinsolve Expert software has a slightly different output:
[Needs to be double checked as I do not have access to this software -LCageman]
- Output into JCAMP-DX is not possible
- 'spectrum_processed.1d' is not generated
- (new) 'fid.1d' - seems to be the same as 'data.1d'
- (new) 'proc.par' - contains processing parameters in the same style as 'acqu.par'
- (new) .pt1 files - seem to be plot files specific for the expert software, cannot
be read by NMRglue
"""
def read(dir='.', specfile=None, acqupar="acqu.par", procpar="proc.par"):
"""
Reads spinsolve files from a directory
When no spectrum filename is given (specfile), the following list is tried, in
that specific order
["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"]
To use the resolution enhanced spectrum use the './Enhanced' folder as input.
Note that spectrum.1d and spectrum_processed.1d contain only data in the
frequency domain, so no Fourier transformation is needed. Also, use
dic["spectrum"]["xaxis"] to plot the x-axis
Parameters
----------
dir : str
Directory to read from
specfile : str, optional
Filename to import spectral data from. None uses standard filename from:
["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"]
acqupar : str, optional
Filename for acquisition parameters. None uses standard name.
procpar : str, optional
Filename for processing parameters. None uses standard name.
Returns
-------
dic : dict
All parameters that can be present in the data folder:
dic["spectrum"] - First bytes of spectrum(_processed).1d
dic["acqu"] - Parameters present in acqu.par
dic["proc"] - Parameters present in proc.par
dic["dx"] - - Parameters present in the header of nmr_fid.dx
data : ndarray
Array of NMR data
"""
if os.path.isdir(dir) is not True:
raise IOError("directory %s does not exist" % (dir))
# Create empty dic
dic = {"spectrum": {}, "acqu": {}, "proc":{}, "dx":{}}
# Read in acqu.par and write to dic
acqupar = os.path.join(dir, acqupar)
if os.path.isfile(acqupar):
with open(acqupar, "r") as f:
info = f.readlines()
for line in info:
line = line.replace("\n", "")
k, v = line.split("=")
dic["acqu"][k.strip()] = v.strip()
# Read in proc.par and write to dic
procpar = os.path.join(dir,procpar)
if os.path.isfile(procpar):
with open(procpar, "r") as f:
info = f.readlines()
for line in info:
line = line.replace("\n", "")
k, v = line.split("=")
dic["proc"][k.strip()] = v.strip()
# Define which spectrumfile to take, using 'specfile' when defined, otherwise
# the files in 'priority_list' are tried, in that particular order
priority_list = ["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d", None]
if specfile:
inputfile = os.path.join(dir, specfile)
if not os.path.isfile(inputfile):
raise IOError("File %s does not exist" % (inputfile))
else:
for priority in priority_list:
if priority == None:
raise IOError("directory %s does not contain spectral data" % (dir))
inputfile = os.path.join(dir, priority)
if os.path.isfile(inputfile):
break
# Detect which file we are dealing with from the extension and read in the spectral data
# Reading .dx file using existing nmrglue.fileio.jcampdx module
if inputfile.split('.')[-1] == "dx":
dic["dx"], raw_data = jcampdx.read(inputfile)
data = np.empty((int(dic["dx"]["$TD"][0]), ), dtype='complex128')
data = raw_data[0][:] + 1j * raw_data[1][:]
# Reading .1d files
elif inputfile.split('.')[-1] == "1d":
with open(inputfile, "rb") as f:
raw_data = f.read()
# Write out parameters from the first 32 bytes into dic["spectrum"]
keys = ["owner", "format", "version", "dataType", "xDim", "yDim", "zDim", "qDim"]
for i, k in enumerate(keys):
start = i * 4
end = start + 4
value = int.from_bytes( raw_data[start:end], "little")
dic["spectrum"][k] = value
data = np.frombuffer(raw_data[end:], "<f")
# The first 1/3 of the file is xaxis data (s or ppm)
split = data.shape[-1] // 3
xscale = data[0 : split]
dic["spectrum"]["xaxis"] = xscale
# The rest is real and imaginary data points interleaved
data = data[split : : 2] + 1j * data[split + 1 : : 2]
else:
raise IOError("File %s cannot be interpreted, use .dx or .1d instead" % (inputfile))
return dic,data
def guess_udic(dic,data):
"""
Guess parameters of universal dictionary from dic, data pair.
Parameters
----------
dic : dict
Dictionary of JCAMP-DX, acqu, proc and spectrum parameters.
data : ndarray
Array of NMR data.
Returns
-------
udic : dict
Universal dictionary of spectral parameters.
"""
# Create an empty universal dictionary
udic = fileiobase.create_blank_udic(1)
# Update defalt parameters, first acqu.par parameters in dic are tried, then JCAMP-DX header parameters
# size
if data is not None:
udic[0]["size"] = len(data)
else:
warn('No data, cannot set udic size')
# sw
try:
udic[0]['sw'] = float(dic['acqu']['bandwidth']) * 1000
except KeyError:
try:
udic[0]['sw'] = float(dic['dx']['$SW'][0]) * float(dic['dx']['$BF1'][0])
except KeyError:
try:
if dic["spectrum"]["freqdata"]:
udic[0]['sw'] = dic["spectrum"]["xaxis"][-1] - dic["spectrum"]["xaxis"][0]
elif data is not None:
udic[0]['sw'] = len(data) / dic["spectrum"]["xaxis"][-1]
else:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
except KeyError:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
# obs
try:
udic[0]['obs'] = float(dic['acqu']['b1Freq'])
except KeyError:
try:
udic[0]['obs'] = float(dic['dx']['$BF1'][0])
except KeyError:
warn("Cannot set observe frequency - set manually using: 'udic[0]['obs'] = x' where x is magnetic field in MHz")
# car
try:
udic[0]['car'] = float(dic['acqu']['lowestFrequency']) + (float(dic['acqu']['bandwidth']) * 1000 / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$REFERENCEPOINT'][0]) * -1 ) + (float(dic['dx']['$SW'][0]) * udic[0]['obs'] / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$BF1'][0]) - float(dic['dx']['$SF'][0])) * 1000000
except KeyError:
warn("Cannot set carrier - try: 'udic[0]['car'] = x * udic[0]['obs']' where x is the center of the spectrum in ppm")
# label
try:
udic[0]['label'] = dic['acqu']['rxChannel']
except KeyError:
try:
label_value = dic['dx'][".OBSERVENUCLEUS"][0].replace("^", "")
udic[0]["label"] = label_value
except KeyError:
warn("Cannot set observed nucleus label")
#keys left to default
# udic[0]['complex']
# udic[0]['encoding']
# udic[0]['time'] = True
# udic[0]['freq'] = False
return udic
| [((3216, 3242), 'os.path.join', 'os.path.join', (['dir', 'acqupar'], {}), '(dir, acqupar)\n', (3228, 3242), False, 'import os\n'), ((3250, 3273), 'os.path.isfile', 'os.path.isfile', (['acqupar'], {}), '(acqupar)\n', (3264, 3273), False, 'import os\n'), ((3551, 3577), 'os.path.join', 'os.path.join', (['dir', 'procpar'], {}), '(dir, procpar)\n', (3563, 3577), False, 'import os\n'), ((3584, 3607), 'os.path.isfile', 'os.path.isfile', (['procpar'], {}), '(procpar)\n', (3598, 3607), False, 'import os\n'), ((2980, 2998), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (2993, 2998), False, 'import os\n'), ((4125, 4152), 'os.path.join', 'os.path.join', (['dir', 'specfile'], {}), '(dir, specfile)\n', (4137, 4152), False, 'import os\n'), ((6583, 6620), 'warnings.warn', 'warn', (['"""No data, cannot set udic size"""'], {}), "('No data, cannot set udic size')\n", (6587, 6620), False, 'from warnings import warn\n'), ((4168, 4193), 'os.path.isfile', 'os.path.isfile', (['inputfile'], {}), '(inputfile)\n', (4182, 4193), False, 'import os\n'), ((4452, 4479), 'os.path.join', 'os.path.join', (['dir', 'priority'], {}), '(dir, priority)\n', (4464, 4479), False, 'import os\n'), ((4495, 4520), 'os.path.isfile', 'os.path.isfile', (['inputfile'], {}), '(inputfile)\n', (4509, 4520), False, 'import os\n'), ((5466, 5501), 'numpy.frombuffer', 'np.frombuffer', (['raw_data[end:]', '"""<f"""'], {}), "(raw_data[end:], '<f')\n", (5479, 5501), True, 'import numpy as np\n'), ((7644, 7766), 'warnings.warn', 'warn', (['"""Cannot set observe frequency - set manually using: \'udic[0][\'obs\'] = x\' where x is magnetic field in MHz"""'], {}), '(\n "Cannot set observe frequency - set manually using: \'udic[0][\'obs\'] = x\' where x is magnetic field in MHz"\n )\n', (7648, 7766), False, 'from warnings import warn\n'), ((8622, 8663), 'warnings.warn', 'warn', (['"""Cannot set observed nucleus label"""'], {}), "('Cannot set observed nucleus label')\n", (8626, 8663), False, 'from warnings import warn\n'), ((7326, 7447), 'warnings.warn', 'warn', (['"""Cannot set spectral width - set manually using: \'udic[0][\'sw\'] = x\' where x is the spectral width in Hz"""'], {}), '(\n "Cannot set spectral width - set manually using: \'udic[0][\'sw\'] = x\' where x is the spectral width in Hz"\n )\n', (7330, 7447), False, 'from warnings import warn\n'), ((8242, 8368), 'warnings.warn', 'warn', (['"""Cannot set carrier - try: \'udic[0][\'car\'] = x * udic[0][\'obs\']\' where x is the center of the spectrum in ppm"""'], {}), '(\n "Cannot set carrier - try: \'udic[0][\'car\'] = x * udic[0][\'obs\']\' where x is the center of the spectrum in ppm"\n )\n', (8246, 8368), False, 'from warnings import warn\n'), ((7169, 7290), 'warnings.warn', 'warn', (['"""Cannot set spectral width - set manually using: \'udic[0][\'sw\'] = x\' where x is the spectral width in Hz"""'], {}), '(\n "Cannot set spectral width - set manually using: \'udic[0][\'sw\'] = x\' where x is the spectral width in Hz"\n )\n', (7173, 7290), False, 'from warnings import warn\n')] |
mielgosez/navigation_analytics | src/navigation_analytics/navigation_data.py | 3c382e8200afe4d37fa0880f155bf1bb2f48b83f | import logging
import copy
import pickle
import pandas as pd
class BaseClass:
def __init__(self,
input_data: pd.DataFrame,
logger: logging.Logger,
metadata: dict):
self.__input_data = input_data
self.__logger = logger
self.__metadata = metadata
@property
def logger(self):
return self.__logger
@property
def metadata(self):
return self.__metadata
@property
def input_data(self):
return self.__input_data
@input_data.setter
def input_data(self, new_input_data: pd.DataFrame):
self.__input_data = new_input_data
@property
def events_id(self):
return self.__metadata['metadata']['primary_keys']['events']
@property
def session_id(self):
return self.__metadata['metadata']['primary_keys']['sessions']
@property
def page_id(self):
return self.__metadata['metadata']['primary_keys']['pages']
@property
def group_id(self):
return self.metadata['metadata']['valid_values']['groups']['group_id']
@property
def valid_groups(self):
return self.metadata['metadata']['valid_values']['groups']['valid']
@property
def action_id(self):
return self.metadata['metadata']['valid_values']['actions']['action_id']
@property
def valid_actions(self):
return self.metadata['metadata']['valid_values']['actions']['valid']
@property
def search_action(self):
return self.metadata['metadata']['valid_values']['actions']['search_action']
@property
def visit_action(self):
return self.metadata['metadata']['valid_values']['actions']['visit_action']
@property
def timestamp_id(self):
return self.metadata['metadata']['datetime']
@property
def kpi_duration(self):
return self.metadata['metadata']['valid_values']['kpis']['duration_page']
@property
def kpi_position(self):
return self.metadata['metadata']['valid_values']['kpis']['result_position']
@property
def kpi_number_results(self):
return self.metadata['metadata']['valid_values']['kpis']['number_results']
class DataValidator(BaseClass):
def __init__(self,
logger: logging.Logger,
metadata: dict,
input_data: pd.DataFrame):
super().__init__(logger=logger,
metadata=metadata,
input_data=input_data)
self.default_pipeline()
# Pipelines
def default_pipeline(self):
self.check_events_are_unique()
self.check_groups_are_valid()
self.check_one_group_per_session()
# Validation Rules
def check_events_are_unique(self):
"""
Verifies that event identifier is primary key of input data.
:return: Validation
"""
number_rows = self.input_data.shape[0]
events_id = self.metadata['metadata']['primary_keys']['events']
number_events = len(self.input_data[events_id].unique())
if number_rows == number_events:
self.logger.info(f'Validation - Events are unique: {number_rows} rows and {number_events} events.')
else:
self.logger.error(f'Validation - Events are not unique: {number_rows} rows and {number_events} events.')
def check_groups_are_valid(self):
"""
Verifies that groups matches with those declared in metadata.
:return: Validation
"""
group_id = self.metadata['metadata']['valid_values']['groups']['group_id']
groups_in_data = list(self.input_data[group_id].unique())
group_valid_names = list(self.metadata['metadata']['valid_values']['groups']['valid'])
if set(groups_in_data) == set(group_valid_names):
self.logger.info(f'Validation - Groups are valid: {", ".join(group_valid_names)}.')
else:
self.logger.error(f'Validation - Group names are not valid: '
f'Names in data are {", ".join(groups_in_data)}. '
f'Names in metadata are {", ".join(group_valid_names)}.')
def check_one_group_per_session(self):
"""
Verifies that there's at most one group per session.
:return: Validation
"""
group_id = self.metadata['metadata']['valid_values']['groups']['group_id']
session_id = self.metadata['metadata']['primary_keys']['sessions']
max_num_groups = self.input_data.groupby(session_id)[group_id].apply(lambda x: len(set(x))).max()
if max_num_groups == 1:
self.logger.info(f'Validation - Just one group per session.')
else:
self.logger.error(f'Validation - Groups per session is different to one. '
f'Maximum number of groups per session detected in data set is: {max_num_groups}')
class SessionAnalyzer(BaseClass):
def __init__(self,
input_data: pd.DataFrame,
metadata: dict,
logger: logging.Logger):
super().__init__(logger=logger,
metadata=metadata,
input_data=input_data)
self.__results = dict()
self.__session_data = self.create_session_look_up()
self.__page_data = self.create_page_look_up()
self.__page_data_out = self.create_page_look_up_out()
self.__search_table = self.create_search_table()
self.__duration_table = self.create_duration_table()
def filter_session_by_group(self, group_id: str):
"""
Filter session by group id provided in the input. This is expected to be a recurrent operation.
:param group_id:
:return:
"""
if group_id not in self.valid_groups:
self.logger.error(f'{group_id} is not a valid group.')
return self.session_data.loc[self.session_data[self.group_id] == group_id, :]
# Metrics
def compute_click_through_rate(self, group_id: str = None):
"""
This function computes the click through rate, understanding this quantity as the ratio of searches ending up in
a session landing in a page. Session Attribute.
:param group_id:
:return:
"""
result = None
if group_id is None:
key = 'click_through_rate'
sub_key = 'all'
# Merging sessions with page ids
df = copy.deepcopy(self.session_data.merge(self.page_data, on=self.session_id, how='left'))
# Computing boolean vector: True means session has a visit, False otherwise.
result = df.groupby(by=self.session_id)[self.action_id].apply(lambda x: self.visit_action in set(x))
else:
key = 'click_through_rate'
sub_key = group_id
if group_id in self.valid_groups:
# Filtering sessions by required group.
filtered_sessions = self.filter_session_by_group(group_id=group_id)
df = copy.deepcopy(filtered_sessions.merge(self.page_data, on=self.session_id, how='left'))
result = df.groupby(by='session_id').action.apply(lambda x: 'visitPage' in set(x))
else:
self.logger.error(f'{group_id} is not a valid group.')
# Computing ctr
ctr = sum(result) / len(result)
self.logger.info(f'Click Through Rate is equal to: {ctr}')
# Storing results
update_result = self.kpi_results
try:
update_result[key][key].append(ctr)
update_result[key]['group'].append(sub_key)
except KeyError:
update_result[key] = dict()
update_result[key][key] = [ctr]
update_result[key]['group'] = [sub_key]
self.kpi_results = update_result
return ctr
def compute_search_frequency(self,
group_id: str = None,
number_ranking: int = 10):
"""
Get the most common first result per session. This is a Session Attribute.
:param number_ranking: Number of results to visualize.
:param group_id:
:return:
"""
if group_id is None:
key = 'search_frequency'
sub_key = 'all'
df_sessions = self.session_data.copy()
else:
key = 'search_frequency'
sub_key = group_id
df_sessions = self.filter_session_by_group(group_id=group_id)
df = df_sessions.merge(self.page_data, on=self.session_id, how='left')
# Merge with duration table to retrieve datestamp data.
df_all = df.merge(self.duration_table, on=self.page_id, how='left')
df_all.dropna(inplace=True)
# Most common first result
df_all = df_all.groupby('session_id').apply(lambda x:
x.loc[x[self.timestamp_id] == min(x[self.timestamp_id]),
[self.kpi_position, self.timestamp_id]])
# Result
result = df_all[self.kpi_position].value_counts(normalize=True)[:number_ranking]
self.logger.info(f'Most common result is {result.index[0]}')
# Store result
updated_results = self.kpi_results
try:
updated_results[key][key].extend(list(result.values))
updated_results[key]['position'].extend(list(result.index))
updated_results[key]['group'].extend([sub_key]*len(result.index))
except KeyError:
updated_results[key] = dict()
updated_results[key][key] = list(result.values)
updated_results[key]['position'] = list(result.index)
updated_results[key]['group'] = [sub_key]*len(result.index)
self.kpi_results = updated_results
return result
def compute_zero_result_rate(self,
group_id: str = None):
"""
Computes the proportion of searches that end up in no results.
:param group_id:
:return:
"""
df = self.search_table.copy()
# Compute number of searches resulting in found elements.
df['success'] = [True if item == 0 else False for item in df[self.kpi_number_results]]
if group_id is None:
key = 'zero_result_rate'
sub_key = 'all'
result = df['success']
else:
key = 'zero_result_rate'
sub_key = group_id
df_sessions = self.filter_session_by_group(group_id=group_id)
df_pages = df_sessions.merge(self.page_data, on=self.session_id, how='left')
df = df.merge(df_pages, on=self.page_id, how='left')
df.dropna(inplace=True)
result = df['success']
# Computing result
value = sum(result) / len(result)
self.logger.info(f'Zero result rate is: {value}')
# Storing result.
updated_results = self.kpi_results
try:
updated_results[key][key].append(value)
updated_results[key]['group'].append(sub_key)
except KeyError:
updated_results[key] = dict()
updated_results[key][key] = [value]
updated_results[key]['group'] = [sub_key]
self.kpi_results = updated_results
return value
def compute_session_length(self,
group_id: str = None):
"""
Compute session's length
:param group_id:
:return:
"""
if group_id is None:
key = 'session_length'
sub_key = 'all'
df = self.input_data
else:
key = 'session_length'
sub_key = group_id
df = self.filter_session_by_group(group_id=group_id)
df = df.merge(self.input_data, on=self.session_id, how='left')
# Compute results
value = df.groupby(self.session_id)[self.timestamp_id].apply(lambda x: (max(x) - min(x)).total_seconds())
time_value = df.groupby(self.session_id)[self.timestamp_id].min()
# Store results
updated_results = self.kpi_results
try:
updated_results[key][key].extend(list(value.values))
updated_results[key]['session_date'].extend(list(time_value.values))
updated_results[key]['session_id'].extend(list(value.index))
updated_results[key]['group'].extend([sub_key]*len(value.index))
except KeyError:
updated_results[key] = dict()
updated_results[key][key] = list(value.values)
updated_results[key]['session_date'] = list(time_value.values)
updated_results[key]['session_id'] = list(value.index)
updated_results[key]['group'] = [sub_key]*len(value.index)
self.kpi_results = updated_results
return value
# Instantiation
def update_data(self):
self.page_data = self.create_page_look_up()
self.page_data_out = self.create_page_look_up_out()
self.session_data = self.create_session_look_up()
self.duration_table = self.create_duration_table()
self.search_table = self.create_search_table()
def create_session_look_up(self):
return self.input_data[[self.session_id, self.group_id]].drop_duplicates()
def create_page_look_up_out(self):
return self.input_data[[self.session_id, self.page_id]].drop_duplicates()
def create_page_look_up(self):
return self.input_data[[self.session_id, self.page_id, self.action_id]].drop_duplicates()
def create_search_table(self):
"""
Preserves just search results from original dataset.
:return: Information relevant only to searches
"""
local_df = self.input_data.copy()
local_df = local_df.loc[local_df[self.action_id] == self.search_action,
[self.events_id, self.timestamp_id, self.page_id, self.kpi_number_results]]
return local_df
def create_duration_table(self):
"""
Preserves just search results from original dataset.
:return: Information relevant only to searches
"""
local_df = self.input_data.copy()
local_df = local_df.loc[local_df[self.action_id] != self.search_action,
[self.timestamp_id,
self.page_id,
self.kpi_position,
self.kpi_duration]]
# Remove redundant information on position and duration
local_df = local_df.groupby(self.page_id).max()
no_duration_info = local_df[self.kpi_duration].isna()
no_position_info = local_df[self.kpi_position].isna()
self.logger.warning(f'{no_position_info.sum()} NA values for {self.kpi_position}.')
self.logger.warning(f'{no_duration_info.sum()} NA values for {self.kpi_duration}.')
# Remove those observations where position of results do not exist while there is duration
no_position_but_duration = [(2 * item[1] - item[0]) != 2 for item in zip(no_duration_info, no_position_info)]
position_but_duration = [(2 * item[1] - item[0]) == 2 for item in zip(no_duration_info, no_position_info)]
kpi_results = self.kpi_results
kpi_results['invalid_results'] = local_df.loc[position_but_duration, :].copy()
self.kpi_results = kpi_results
self.logger.warning(f'{sum([not item for item in no_position_but_duration])} '
f'NA values for position with duration.')
local_df = local_df.loc[no_position_but_duration, :]
# The rest of cases fill 0
local_df.fillna(0, inplace=True)
local_df.reset_index(inplace=True)
local_df.sort_values(by=[self.timestamp_id, self.page_id], inplace=True)
return local_df
# Getters and setters
@property
def session_data(self):
return self.__session_data
@session_data.setter
def session_data(self, new_session_data: pd.DataFrame):
self.__session_data = new_session_data
@property
def page_data(self):
return self.__page_data
@page_data.setter
def page_data(self, new_page_data: pd.DataFrame):
self.__page_data = new_page_data
@property
def page_data_out(self):
return self.__page_data_out
@page_data_out.setter
def page_data_out(self, new_page_data_out: pd.DataFrame):
self.__page_data_out = new_page_data_out
@property
def number_sessions(self):
return self.session_data.shape[0]
@property
def number_pages(self):
return self.page_data.shape[0]
@property
def duration_table(self):
return self.__duration_table
@duration_table.setter
def duration_table(self, new_duration_table: pd.DataFrame):
self.__duration_table = new_duration_table
@property
def search_table(self):
return self.__search_table
@search_table.setter
def search_table(self, new_search_table: pd.DataFrame):
self.__search_table = new_search_table
@property
def kpi_results(self):
return self.__results
@kpi_results.setter
def kpi_results(self, results: dict):
self.__results = results
class NavigationDataAnalyzer:
def __init__(self,
input_data: pd.DataFrame,
metadata: dict,
logger_level: int = logging.WARNING):
self.__logger = logging.Logger(name='default_logger',
level=logger_level)
self.__input_data = input_data
self.__metadata = metadata
self.__data_validator = DataValidator(input_data=input_data,
metadata=metadata,
logger=self.logger)
self.__session_analyzer = SessionAnalyzer(input_data=input_data,
metadata=metadata,
logger=self.logger)
def get_number_events(self,
group_name: str = None):
"""
Method used to retrieve the number of events in the dataset. It can be also be filtered by group name.
This function assumes that events are the primary key of the dataset.
:param group_name: Name of the study groups as defined in metadata (['valid_values']['groups']['valid'])
:return: Number of events in the dataset (in total or per group)
"""
groups_id = self.metadata['metadata']['valid_values']['groups']['group_id']
valid_groups = self.metadata['metadata']['valid_values']['groups']['valid']
if group_name is None:
return self.input_data.shape[0]
else:
if group_name in valid_groups:
return self.input_data.loc[self.input_data[groups_id] == group_name].shape[0]
else:
self.logger.error(f'{group_name} is not a valid group name. '
f'Please select among those listed here: {", ".join(valid_groups)}')
def save(self, name: str = 'navigation_data_analyzer.pickle'):
objects_to_store = dict()
objects_to_store['metadata'] = self.metadata
objects_to_store['input_data'] = self.input_data
objects_to_store['kpi_results'] = self.session_analyzer.kpi_results
with open(name, 'wb') as fp:
pickle.dump(objects_to_store, fp)
@staticmethod
def load(filepath: str):
with open(filepath, 'rb') as fp:
existing_object = pickle.load(fp)
instance_object = NavigationDataAnalyzer(input_data=existing_object['input_data'],
metadata=existing_object['metadata'])
instance_object.session_analyzer.kpi_results = existing_object['kpi_results']
return instance_object
def to_excel(self, filename: str):
excel_writer = pd.ExcelWriter(filename)
self.session_analyzer.session_data.to_excel(excel_writer, sheet_name='session_data', index=False)
self.session_analyzer.page_data_out.to_excel(excel_writer, sheet_name='page_data', index=False)
self.session_analyzer.duration_table.to_excel(excel_writer, sheet_name='duration_table', index=False)
self.session_analyzer.search_table.to_excel(excel_writer, sheet_name='search_table', index=False)
for key, value in self.session_analyzer.kpi_results.items():
results = pd.DataFrame(value)
results.to_excel(excel_writer, sheet_name=f'kpi_{key}', index=False)
groups_df = pd.DataFrame({'group': self.session_analyzer.valid_groups})
groups_df.to_excel(excel_writer, sheet_name='groups', index=False)
excel_writer.save()
excel_writer.close()
# Getters and Setters
@property
def session_analyzer(self):
return self.__session_analyzer
@property
def data_validator(self):
return self.__data_validator
@property
def input_data(self):
return self.__input_data
@input_data.setter
def input_data(self, new_input_data: pd.DataFrame):
self.data_validator.input_data = new_input_data
self.data_validator.default_pipeline()
self.__input_data = new_input_data
@property
def metadata(self):
return self.__metadata
@metadata.setter
def metadata(self, new_metadata: dict):
self.__input_data = new_metadata
@property
def logger(self):
return self.__logger
@logger.setter
def logger(self, new_logger):
self.__logger = new_logger
| [((17565, 17622), 'logging.Logger', 'logging.Logger', ([], {'name': '"""default_logger"""', 'level': 'logger_level'}), "(name='default_logger', level=logger_level)\n", (17579, 17622), False, 'import logging\n'), ((20100, 20124), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['filename'], {}), '(filename)\n', (20114, 20124), True, 'import pandas as pd\n'), ((20763, 20822), 'pandas.DataFrame', 'pd.DataFrame', (["{'group': self.session_analyzer.valid_groups}"], {}), "({'group': self.session_analyzer.valid_groups})\n", (20775, 20822), True, 'import pandas as pd\n'), ((19561, 19594), 'pickle.dump', 'pickle.dump', (['objects_to_store', 'fp'], {}), '(objects_to_store, fp)\n', (19572, 19594), False, 'import pickle\n'), ((19714, 19729), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (19725, 19729), False, 'import pickle\n'), ((20642, 20661), 'pandas.DataFrame', 'pd.DataFrame', (['value'], {}), '(value)\n', (20654, 20661), True, 'import pandas as pd\n')] |
CalColson/openskill.py | openskill/statistics.py | ab61ca57fa6e60140d0a292c73440f22ceabd9a2 | import sys
import scipy.stats
normal = scipy.stats.norm(0, 1)
def phi_major(x):
return normal.cdf(x)
def phi_minor(x):
return normal.pdf(x)
def v(x, t):
xt = x - t
denom = phi_major(xt)
return -xt if (denom < sys.float_info.epsilon) else phi_minor(xt) / denom
def w(x, t):
xt = x - t
denom = phi_major(xt)
if denom < sys.float_info.epsilon:
return 1 if (x < 0) else 0
return v(x, t) * (v(x, t) + xt)
def vt(x, t):
xx = abs(x)
b = phi_major(t - xx) - phi_major(-t - xx)
if b < 1e-5:
if x < 0:
return -x - t
return -x + t
a = phi_minor(-t - xx) - phi_minor(t - xx)
return (-a if x < 0 else a) / b
def wt(x, t):
xx = abs(x)
b = phi_major(t - xx) - phi_major(-t - xx)
if b < sys.float_info.epsilon:
return 1.0
return ((t - xx) * phi_minor(t - xx) + (t + xx) * phi_minor(-t - xx)) / b + vt(
x, t
) * vt(x, t)
| [] |
revesansparole/oacontainer | src/openalea/container/graph.py | 066a15b8b1b22f857bf25ed443c5f39f4cbefb3e | # -*- coding: utf-8 -*-
#
# Graph : graph package
#
# Copyright or Copr. 2006 INRIA - CIRAD - INRA
#
# File author(s): Jerome Chopard <jerome.chopard@sophia.inria.fr>
#
# Distributed under the Cecill-C License.
# See accompanying file LICENSE.txt or copy at
# http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html
#
# VPlants WebSite : https://gforge.inria.fr/projects/vplants/
#
"""This module provide a simple pure python implementation
for a graph interface
does not implement copy concept
"""
from id_dict import IdDict
class GraphError(Exception):
"""
base class of all graph exceptions
"""
class InvalidEdge(GraphError, KeyError):
"""
exception raised when a wrong edge id is provided
"""
class InvalidVertex(GraphError, KeyError):
"""
exception raised when a wrong vertex id is provided
"""
class Graph(object):
"""Directed graph with multiple links
in this implementation :
- vertices are tuple of edge_in,edge_out
- edges are tuple of source,target
"""
def __init__(self, graph=None, idgenerator="set"):
"""constructor
if graph is not none make a copy of the topological structure of graph
(i.e. don't use the same id)
args:
- graph (Graph): the graph to copy, default=None
- idgenerator (str): type of idgenerator to use, default 'set'
"""
self._vertices = IdDict(idgenerator=idgenerator)
self._edges = IdDict(idgenerator=idgenerator)
if graph is not None:
self.extend(graph)
# ##########################################################
#
# Graph concept
#
# ##########################################################
def source(self, eid):
"""Retrieve the source vertex of an edge
args:
- eid (int): edge id
return:
- (int): vertex id
"""
try:
return self._edges[eid][0]
except KeyError:
raise InvalidEdge(eid)
def target(self, eid):
"""Retrieve the target vertex of an edge
args:
- eid (int): edge id
return:
- (int): vertex id
"""
try:
return self._edges[eid][1]
except KeyError:
raise InvalidEdge(eid)
def edge_vertices(self, eid):
"""Retrieve both source and target vertex of an edge
args:
- eid (int): edge id
return:
- (int, int): source id, target id
"""
try:
return self._edges[eid]
except KeyError:
raise InvalidEdge(eid)
def edge(self, source, target):
"""Find the matching edge with same source and same target
return None if it don't succeed
args:
- source (int): source vertex
- target (int): target vertex
return:
- (int): edge id with same source and target
- (None): if search is unsuccessful
"""
if target not in self:
raise InvalidVertex(target)
for eid in self.out_edges(source):
if self.target(eid) == target:
return eid
return None
def __contains__(self, vid):
"""magic alias for `has_vertex`
"""
return self.has_vertex(vid)
def has_vertex(self, vid):
"""test whether a vertex belong to the graph
args:
- vid (int): id of vertex
return:
- (bool)
"""
return vid in self._vertices
def has_edge(self, eid):
"""test whether an edge belong to the graph
args:
- eid (int): id of edge
return:
- (bool)
"""
return eid in self._edges
def is_valid(self):
"""Test the validity of the graph
return:
- (bool)
"""
return True
# ##########################################################
#
# Vertex List Graph Concept
#
# ##########################################################
def vertices(self):
"""Iterator on all vertices
return:
- (iter of int)
"""
return iter(self._vertices)
def __iter__(self):
"""Magic alias for `vertices`
"""
return iter(self._vertices)
def nb_vertices(self):
"""Total number of vertices in the graph
return:
- (int)
"""
return len(self._vertices)
def __len__(self):
"""Magic alias for `nb_vertices`
"""
return self.nb_vertices()
def in_neighbors(self, vid):
"""Iterator on the neighbors of vid
where edges are directed from neighbor to vid
args:
- vid (int): vertex id
return:
- (iter of int): iter of vertex id
"""
if vid not in self:
raise InvalidVertex(vid)
neighbors_list = [self.source(eid) for eid in self._vertices[vid][0]]
return iter(set(neighbors_list))
def out_neighbors(self, vid):
"""Iterator on the neighbors of vid
where edges are directed from vid to neighbor
args:
- vid (int): vertex id
return:
- (iter of int): iter of vertex id
"""
if vid not in self:
raise InvalidVertex(vid)
neighbors_list = [self.target(eid) for eid in self._vertices[vid][1]]
return iter(set(neighbors_list))
def neighbors(self, vid):
"""Iterator on all neighbors of vid both in and out
args:
- vid (int): vertex id
return:
- (iter of int): iter of vertex id
"""
neighbors_list = list(self.in_neighbors(vid))
neighbors_list.extend(self.out_neighbors(vid))
return iter(set(neighbors_list))
def nb_in_neighbors(self, vid):
"""Number of in neighbors of vid
where edges are directed from neighbor to vid
args:
- vid (int): vertex id
return:
- (int)
"""
neighbors_set = list(self.in_neighbors(vid))
return len(neighbors_set)
def nb_out_neighbors(self, vid):
"""Number of out neighbors of vid
where edges are directed from vid to neighbor
args:
- vid (int): vertex id
return:
- (int)
"""
neighbors_set = list(self.out_neighbors(vid))
return len(neighbors_set)
def nb_neighbors(self, vid):
"""Total number of both in and out neighbors of vid
args:
- vid (int): vertex id
return:
- (int)
"""
neighbors_set = list(self.neighbors(vid))
return len(neighbors_set)
# ##########################################################
#
# Edge List Graph Concept
#
# ##########################################################
def _iter_edges(self, vid):
"""
internal function that perform 'edges' with vid not None
"""
link_in, link_out = self._vertices[vid]
for eid in link_in:
yield eid
for eid in link_out:
yield eid
def edges(self, vid=None):
"""Iterate on all edges connected to a given vertex.
If vid is None (default), iterate on all edges in the graph
args:
- vid (int): vertex holdings edges, default (None)
return:
- (iter of int): iterator on edge ids
"""
if vid is None:
return iter(self._edges)
if vid not in self:
raise InvalidVertex(vid)
return self._iter_edges(vid)
def nb_edges(self, vid=None):
"""Number of edges connected to a given vertex.
If vid is None (default), total number of edges in the graph
args:
- vid (int): vertex holdings edges, default (None)
return:
- (int)
"""
if vid is None:
return len(self._edges)
if vid not in self:
raise InvalidVertex(vid)
return len(self._vertices[vid][0]) + len(self._vertices[vid][1])
def in_edges(self, vid):
"""Iterate on all edges pointing to a given vertex.
args:
- vid (int): vertex target of edges
return:
- (iter of int): iterator on edge ids
"""
if vid not in self:
raise InvalidVertex(vid)
for eid in self._vertices[vid][0]:
yield eid
def out_edges(self, vid):
"""Iterate on all edges away from a given vertex.
args:
- vid (int): vertex source of edges
return:
- (iter of int): iterator on edge ids
"""
if vid not in self:
raise InvalidVertex(vid)
for eid in self._vertices[vid][1]:
yield eid
def nb_in_edges(self, vid):
"""Number of edges pointing to a given vertex.
args:
- vid (int): vertex target of edges
return:
- (int)
"""
if vid not in self:
raise InvalidVertex(vid)
return len(self._vertices[vid][0])
def nb_out_edges(self, vid):
"""Number of edges away from a given vertex.
args:
- vid (int): vertex source of edges
return:
- (int)
"""
if vid not in self:
raise InvalidVertex(vid)
return len(self._vertices[vid][1])
# ##########################################################
#
# Mutable Vertex Graph concept
#
# ##########################################################
def add_vertex(self, vid=None):
"""Add a vertex to the graph.
If vid is not provided create a new vid
args:
- vid (int): id to use. If None (default) will generate a new one
return:
- vid (int): id used for the new vertex
"""
try:
return self._vertices.add((set(), set()), vid)
except KeyError:
raise InvalidVertex(vid)
def remove_vertex(self, vid):
"""Remove a specified vertex of the graph.
Also remove all edge attached to it.
args:
- vid (int): id of vertex to remove
"""
if vid not in self:
raise InvalidVertex(vid)
link_in, link_out = self._vertices[vid]
for edge in list(link_in):
self.remove_edge(edge)
for edge in list(link_out):
self.remove_edge(edge)
del self._vertices[vid]
def clear(self):
"""Remove all vertices and edges
don't change references to objects
"""
self._edges.clear()
self._vertices.clear()
# ##########################################################
#
# Mutable Edge Graph concept
#
# ##########################################################
def add_edge(self, sid, tid, eid=None):
"""Add an edge to the graph.
If eid is not provided generate a new one.
args:
- sid (int): id of source vertex
- tid (int): id of target vertex
- eid (int): id to use. If None (default) will generate a new one
return:
- eid (int): id used for new edge
"""
if sid not in self:
raise InvalidVertex(sid)
if tid not in self:
raise InvalidVertex(tid)
try:
eid = self._edges.add((sid, tid), eid)
except KeyError:
raise InvalidEdge(eid)
self._vertices[sid][1].add(eid)
self._vertices[tid][0].add(eid)
return eid
def remove_edge(self, eid):
"""Remove a specified edge from the graph.
args:
- eid (int): id of edge to remove
"""
if not self.has_edge(eid):
raise InvalidEdge(eid)
sid, tid = self._edges[eid]
self._vertices[sid][1].remove(eid)
self._vertices[tid][0].remove(eid)
del self._edges[eid]
def clear_edges(self):
"""Remove all the edges of the graph
don't change references to objects
"""
self._edges.clear()
for vid, (in_set, out_set) in self._vertices.iteritems():
in_set.clear()
out_set.clear()
# ##########################################################
#
# Extend Graph concept
#
# ##########################################################
def extend(self, graph):
"""Add the specified graph to self, create new vid and eid
args:
- graph (Graph): the graph to add
return:
- (dict of (int, int)): mapping between vertex id in graph and
vertex id in extended self
- (dict of (int, int)): mapping between edge id in graph and
edge id in extended self
"""
# vertex adding
trans_vid = {}
for vid in list(graph.vertices()):
trans_vid[vid] = self.add_vertex()
# edge adding
trans_eid = {}
for eid in list(graph.edges()):
sid = trans_vid[graph.source(eid)]
tid = trans_vid[graph.target(eid)]
trans_eid[eid] = self.add_edge(sid, tid)
return trans_vid, trans_eid
def sub_graph(self, vids):
"""
"""
raise NotImplemented
# from copy import deepcopy
# vids = set(vids)
#
# result = deepcopy(self)
# result._vertices.clear()
# result._edges.clear()
#
# for key, edges in self._vertices.items():
# if key in vids:
# inedges, outedges = edges
# sortedinedges = set(
# [eid for eid in inedges if self.source(eid) in vids])
# sortedoutedges = set(
# [eid for eid in outedges if self.target(eid) in vids])
# result._vertices.add((sortedinedges, sortedoutedges), key)
# for eid in sortedoutedges:
# result._edges.add(self._edges[eid], eid)
#
# return result
| [((1469, 1500), 'id_dict.IdDict', 'IdDict', ([], {'idgenerator': 'idgenerator'}), '(idgenerator=idgenerator)\n', (1475, 1500), False, 'from id_dict import IdDict\n'), ((1523, 1554), 'id_dict.IdDict', 'IdDict', ([], {'idgenerator': 'idgenerator'}), '(idgenerator=idgenerator)\n', (1529, 1554), False, 'from id_dict import IdDict\n')] |
GT-AcerZhang/PaddlePaddle-SSD | nets/mobilenet_v2_ssd.py | 3833afe3470b7dc811409b3d8111b98dc31c6d0e | import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
class MobileNetV2SSD:
def __init__(self, img, num_classes, img_shape):
self.img = img
self.num_classes = num_classes
self.img_shape = img_shape
def ssd_net(self, scale=1.0):
# 300x300
bottleneck_params_list = [(1, 16, 1, 1),
(6, 24, 2, 2),
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1)]
# conv1
input = self.conv_bn_layer(input=self.img,
num_filters=int(32 * scale),
filter_size=3,
stride=2,
padding=1,
if_act=True)
# bottleneck sequences
in_c = int(32 * scale)
for layer_setting in bottleneck_params_list:
t, c, n, s = layer_setting
input = self.invresi_blocks(input=input, in_c=in_c, t=t, c=int(c * scale), n=n, s=s)
in_c = int(c * scale)
# 19x19
module11 = input
tmp = self.invresi_blocks(input=input, in_c=in_c, t=6, c=int(160 * scale), n=3, s=2)
# 10x10
module13 = self.invresi_blocks(input=tmp, in_c=int(160 * scale), t=6, c=int(320 * scale), n=1, s=1)
module14 = self.extra_block(module13, 256, 512, 1)
# 5x5
module15 = self.extra_block(module14, 128, 256, 1)
# 3x3
module16 = self.extra_block(module15, 128, 256, 1)
# 2x2
module17 = self.extra_block(module16, 64, 128, 1)
mbox_locs, mbox_confs, box, box_var = fluid.layers.multi_box_head(
inputs=[module11, module13, module14, module15, module16, module17],
image=self.img,
num_classes=self.num_classes,
min_ratio=20,
max_ratio=90,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2., 3.], [2., 3.]],
base_size=self.img_shape[2],
offset=0.5,
flip=True)
return mbox_locs, mbox_confs, box, box_var
def conv_bn_layer(self, input, filter_size, num_filters, stride, padding, num_groups=1, if_act=True,
use_cudnn=True):
parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
conv = fluid.layers.conv2d(input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
bn = fluid.layers.batch_norm(input=conv)
if if_act:
return fluid.layers.relu6(bn)
else:
return bn
def shortcut(self, input, data_residual):
return fluid.layers.elementwise_add(input, data_residual)
def inverted_residual_unit(self,
input,
num_in_filter,
num_filters,
ifshortcut,
stride,
filter_size,
padding,
expansion_factor):
num_expfilter = int(round(num_in_filter * expansion_factor))
channel_expand = self.conv_bn_layer(input=input,
num_filters=num_expfilter,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=True)
bottleneck_conv = self.conv_bn_layer(input=channel_expand,
num_filters=num_expfilter,
filter_size=filter_size,
stride=stride,
padding=padding,
num_groups=num_expfilter,
if_act=True,
use_cudnn=False)
linear_out = self.conv_bn_layer(input=bottleneck_conv,
num_filters=num_filters,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=False)
if ifshortcut:
out = self.shortcut(input=input, data_residual=linear_out)
return out
else:
return linear_out
def invresi_blocks(self, input, in_c, t, c, n, s):
first_block = self.inverted_residual_unit(input=input,
num_in_filter=in_c,
num_filters=c,
ifshortcut=False,
stride=s,
filter_size=3,
padding=1,
expansion_factor=t)
last_residual_block = first_block
last_c = c
for i in range(1, n):
last_residual_block = self.inverted_residual_unit(input=last_residual_block,
num_in_filter=last_c,
num_filters=c,
ifshortcut=True,
stride=1,
filter_size=3,
padding=1,
expansion_factor=t)
return last_residual_block
def conv_bn(self, input, filter_size, num_filters, stride, padding, num_groups=1, act='relu', use_cudnn=True):
parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
conv = fluid.layers.conv2d(input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def extra_block(self, input, num_filters1, num_filters2, num_groups):
# 1x1 conv
pointwise_conv = self.conv_bn(input=input,
filter_size=1,
num_filters=int(num_filters1),
stride=1,
num_groups=int(num_groups),
padding=0)
# 3x3 conv
normal_conv = self.conv_bn(input=pointwise_conv,
filter_size=3,
num_filters=int(num_filters2),
stride=2,
num_groups=int(num_groups),
padding=1)
return normal_conv
def build_ssd(img, num_classes, img_shape):
ssd_model = MobileNetV2SSD(img, num_classes, img_shape)
return ssd_model.ssd_net()
if __name__ == '__main__':
data = fluid.data(name='data', shape=[None, 3, 300, 300])
build_ssd(data, 21, img_shape=[3, 300, 300])
| [((8339, 8389), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""data"""', 'shape': '[None, 3, 300, 300]'}), "(name='data', shape=[None, 3, 300, 300])\n", (8349, 8389), True, 'import paddle.fluid as fluid\n'), ((1779, 2209), 'paddle.fluid.layers.multi_box_head', 'fluid.layers.multi_box_head', ([], {'inputs': '[module11, module13, module14, module15, module16, module17]', 'image': 'self.img', 'num_classes': 'self.num_classes', 'min_ratio': '(20)', 'max_ratio': '(90)', 'min_sizes': '[60.0, 105.0, 150.0, 195.0, 240.0, 285.0]', 'max_sizes': '[[], 150.0, 195.0, 240.0, 285.0, 300.0]', 'aspect_ratios': '[[2.0], [2.0, 3.0], [2.0, 3.0], [2.0, 3.0], [2.0, 3.0], [2.0, 3.0]]', 'base_size': 'self.img_shape[2]', 'offset': '(0.5)', 'flip': '(True)'}), '(inputs=[module11, module13, module14, module15,\n module16, module17], image=self.img, num_classes=self.num_classes,\n min_ratio=20, max_ratio=90, min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0,\n 285.0], max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],\n aspect_ratios=[[2.0], [2.0, 3.0], [2.0, 3.0], [2.0, 3.0], [2.0, 3.0], [\n 2.0, 3.0]], base_size=self.img_shape[2], offset=0.5, flip=True)\n', (1806, 2209), True, 'import paddle.fluid as fluid\n'), ((2597, 2804), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'input', 'num_filters': 'num_filters', 'filter_size': 'filter_size', 'stride': 'stride', 'padding': 'padding', 'groups': 'num_groups', 'use_cudnn': 'use_cudnn', 'param_attr': 'parameter_attr', 'bias_attr': '(False)'}), '(input=input, num_filters=num_filters, filter_size=\n filter_size, stride=stride, padding=padding, groups=num_groups,\n use_cudnn=use_cudnn, param_attr=parameter_attr, bias_attr=False)\n', (2616, 2804), True, 'import paddle.fluid as fluid\n'), ((3089, 3124), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', ([], {'input': 'conv'}), '(input=conv)\n', (3112, 3124), True, 'import paddle.fluid as fluid\n'), ((3284, 3334), 'paddle.fluid.layers.elementwise_add', 'fluid.layers.elementwise_add', (['input', 'data_residual'], {}), '(input, data_residual)\n', (3312, 3334), True, 'import paddle.fluid as fluid\n'), ((6819, 7026), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', ([], {'input': 'input', 'num_filters': 'num_filters', 'filter_size': 'filter_size', 'stride': 'stride', 'padding': 'padding', 'groups': 'num_groups', 'use_cudnn': 'use_cudnn', 'param_attr': 'parameter_attr', 'bias_attr': '(False)'}), '(input=input, num_filters=num_filters, filter_size=\n filter_size, stride=stride, padding=padding, groups=num_groups,\n use_cudnn=use_cudnn, param_attr=parameter_attr, bias_attr=False)\n', (6838, 7026), True, 'import paddle.fluid as fluid\n'), ((7313, 7357), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', ([], {'input': 'conv', 'act': 'act'}), '(input=conv, act=act)\n', (7336, 7357), True, 'import paddle.fluid as fluid\n'), ((3163, 3185), 'paddle.fluid.layers.relu6', 'fluid.layers.relu6', (['bn'], {}), '(bn)\n', (3181, 3185), True, 'import paddle.fluid as fluid\n'), ((2574, 2580), 'paddle.fluid.initializer.MSRA', 'MSRA', ([], {}), '()\n', (2578, 2580), False, 'from paddle.fluid.initializer import MSRA\n'), ((6796, 6802), 'paddle.fluid.initializer.MSRA', 'MSRA', ([], {}), '()\n', (6800, 6802), False, 'from paddle.fluid.initializer import MSRA\n')] |
caishenghang/oneflow | oneflow/python/test/ops/test_object_bbox_scale.py | db239cc9f98e551823bf6ce2d4395bd5c339b1c5 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import random
import cv2
import numpy as np
import oneflow as flow
import oneflow.typing as oft
def _random_sample_images(anno_file, image_dir, batch_size):
from pycocotools.coco import COCO
image_files = []
image_ids = []
batch_group_id = -1
coco = COCO(anno_file)
img_ids = coco.getImgIds()
while len(image_files) < batch_size:
rand_img_id = random.choice(img_ids)
img_h = coco.imgs[rand_img_id]["height"]
img_w = coco.imgs[rand_img_id]["width"]
group_id = int(img_h / img_w)
if batch_group_id == -1:
batch_group_id = group_id
if group_id != batch_group_id:
continue
anno_ids = coco.getAnnIds(imgIds=[rand_img_id])
if len(anno_ids) == 0:
continue
image_files.append(os.path.join(image_dir, coco.imgs[rand_img_id]["file_name"]))
image_ids.append(rand_img_id)
assert len(image_files) == len(image_ids)
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
bbox_list = _get_images_bbox_list(coco, image_ids)
return images, bbox_list
def _get_images_bbox_list(coco, image_ids):
bbox_list = []
for img_id in image_ids:
anno_ids = coco.getAnnIds(imgIds=[img_id])
anno_ids = list(
filter(lambda anno_id: coco.anns[anno_id]["iscrowd"] == 0, anno_ids)
)
bbox_array = np.array(
[coco.anns[anno_id]["bbox"] for anno_id in anno_ids], dtype=np.single
)
bbox_list.append(bbox_array)
return bbox_list
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
image_static_shape.insert(0, len(image_shapes))
return image_static_shape
def _get_bbox_static_shape(bbox_list):
bbox_shapes = [bbox.shape for bbox in bbox_list]
bbox_static_shape = np.amax(bbox_shapes, axis=0)
assert isinstance(
bbox_static_shape, np.ndarray
), "bbox_shapes: {}, bbox_static_shape: {}".format(
str(bbox_shapes), str(bbox_static_shape)
)
bbox_static_shape = bbox_static_shape.tolist()
bbox_static_shape.insert(0, len(bbox_list))
return bbox_static_shape
def _of_target_resize_bbox_scale(images, bbox_list, target_size, max_size):
image_shape = _get_images_static_shape(images)
bbox_shape = _get_bbox_static_shape(bbox_list)
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def target_resize_bbox_scale_job(
image_def: oft.ListListNumpy.Placeholder(
shape=tuple(image_shape), dtype=flow.float
),
bbox_def: oft.ListListNumpy.Placeholder(
shape=tuple(bbox_shape), dtype=flow.float
),
):
images_buffer = flow.tensor_list_to_tensor_buffer(image_def)
resized_images_buffer, new_size, scale = flow.image_target_resize(
images_buffer, target_size=target_size, max_size=max_size
)
bbox_buffer = flow.tensor_list_to_tensor_buffer(bbox_def)
scaled_bbox = flow.object_bbox_scale(bbox_buffer, scale)
scaled_bbox_list = flow.tensor_buffer_to_tensor_list(
scaled_bbox, shape=bbox_shape[1:], dtype=flow.float
)
return scaled_bbox_list, new_size
input_image_list = [np.expand_dims(image, axis=0) for image in images]
input_bbox_list = [np.expand_dims(bbox, axis=0) for bbox in bbox_list]
output_bbox_list, output_image_size = target_resize_bbox_scale_job(
[input_image_list], [input_bbox_list]
).get()
return output_bbox_list.numpy_lists()[0], output_image_size.numpy_list()[0]
def _compare_bbox_scale(
test_case,
anno_file,
image_dir,
batch_size,
target_size,
max_size,
print_debug_info=False,
):
images, bbox_list = _random_sample_images(anno_file, image_dir, batch_size)
of_bbox_list, image_size_list = _of_target_resize_bbox_scale(
images, bbox_list, target_size, max_size
)
for image, bbox, of_bbox, image_size in zip(
images, bbox_list, of_bbox_list, image_size_list
):
w, h = image_size
oh, ow = image.shape[0:2]
scale_h = h / oh
scale_w = w / ow
bbox[:, 0] *= scale_w
bbox[:, 1] *= scale_h
bbox[:, 2] *= scale_w
bbox[:, 3] *= scale_h
test_case.assertTrue(np.allclose(bbox, of_bbox))
@flow.unittest.skip_unless_1n1d()
class TestObjectBboxScale(flow.unittest.TestCase):
def test_object_bbox_scale(test_case):
_compare_bbox_scale(
test_case,
"/dataset/mscoco_2017/annotations/instances_val2017.json",
"/dataset/mscoco_2017/val2017",
4,
800,
1333,
)
if __name__ == "__main__":
unittest.main()
| [((5441, 5473), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (5471, 5473), True, 'import oneflow as flow\n'), ((891, 906), 'pycocotools.coco.COCO', 'COCO', (['anno_file'], {}), '(anno_file)\n', (895, 906), False, 'from pycocotools.coco import COCO\n'), ((2307, 2336), 'numpy.amax', 'np.amax', (['image_shapes'], {'axis': '(0)'}), '(image_shapes, axis=0)\n', (2314, 2336), True, 'import numpy as np\n'), ((2767, 2795), 'numpy.amax', 'np.amax', (['bbox_shapes'], {'axis': '(0)'}), '(bbox_shapes, axis=0)\n', (2774, 2795), True, 'import numpy as np\n'), ((3281, 3309), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (3307, 3309), True, 'import oneflow as flow\n'), ((3328, 3349), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3347, 3349), True, 'import oneflow as flow\n'), ((3467, 3516), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (3487, 3516), True, 'import oneflow as flow\n'), ((5828, 5843), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5841, 5843), False, 'import unittest\n'), ((1001, 1023), 'random.choice', 'random.choice', (['img_ids'], {}), '(img_ids)\n', (1014, 1023), False, 'import random\n'), ((2028, 2107), 'numpy.array', 'np.array', (["[coco.anns[anno_id]['bbox'] for anno_id in anno_ids]"], {'dtype': 'np.single'}), "([coco.anns[anno_id]['bbox'] for anno_id in anno_ids], dtype=np.single)\n", (2036, 2107), True, 'import numpy as np\n'), ((3433, 3459), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (3457, 3459), True, 'import oneflow as flow\n'), ((3816, 3860), 'oneflow.tensor_list_to_tensor_buffer', 'flow.tensor_list_to_tensor_buffer', (['image_def'], {}), '(image_def)\n', (3849, 3860), True, 'import oneflow as flow\n'), ((3910, 3998), 'oneflow.image_target_resize', 'flow.image_target_resize', (['images_buffer'], {'target_size': 'target_size', 'max_size': 'max_size'}), '(images_buffer, target_size=target_size, max_size=\n max_size)\n', (3934, 3998), True, 'import oneflow as flow\n'), ((4038, 4081), 'oneflow.tensor_list_to_tensor_buffer', 'flow.tensor_list_to_tensor_buffer', (['bbox_def'], {}), '(bbox_def)\n', (4071, 4081), True, 'import oneflow as flow\n'), ((4104, 4146), 'oneflow.object_bbox_scale', 'flow.object_bbox_scale', (['bbox_buffer', 'scale'], {}), '(bbox_buffer, scale)\n', (4126, 4146), True, 'import oneflow as flow\n'), ((4174, 4265), 'oneflow.tensor_buffer_to_tensor_list', 'flow.tensor_buffer_to_tensor_list', (['scaled_bbox'], {'shape': 'bbox_shape[1:]', 'dtype': 'flow.float'}), '(scaled_bbox, shape=bbox_shape[1:], dtype=\n flow.float)\n', (4207, 4265), True, 'import oneflow as flow\n'), ((4350, 4379), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (4364, 4379), True, 'import numpy as np\n'), ((4424, 4452), 'numpy.expand_dims', 'np.expand_dims', (['bbox'], {'axis': '(0)'}), '(bbox, axis=0)\n', (4438, 4452), True, 'import numpy as np\n'), ((1429, 1489), 'os.path.join', 'os.path.join', (['image_dir', "coco.imgs[rand_img_id]['file_name']"], {}), "(image_dir, coco.imgs[rand_img_id]['file_name'])\n", (1441, 1489), False, 'import os\n'), ((5410, 5436), 'numpy.allclose', 'np.allclose', (['bbox', 'of_bbox'], {}), '(bbox, of_bbox)\n', (5421, 5436), True, 'import numpy as np\n'), ((1590, 1612), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (1600, 1612), False, 'import cv2\n')] |
BertRaeymaekers/scrapbook | vagrant/kafka/bin/init.py | 3c8483d4594356fbc84deb8d6496db3d856492c1 | #! /usr/bin/env python3
import json
import os.path
import jinja2
DEFAULT_PARAMS = {
"ansible_user": "vagrant"
}
if __name__ == "__main__":
# Reading configuration
here = os.path.dirname(os.path.realpath(__file__ + "/../"))
with open(here + "/config.json", "r") as rf:
config = json.load(rf)
print(json.dumps(config, sort_keys=True, indent=4))
# Generating an inventory file
with open(here + "/playbook/inventory/hosts", "w") as inventory:
inventory.write("[kafka]\n")
for host in config["hosts"]:
# Setting default values and updating them when more specific.
params = dict()
params.update(DEFAULT_PARAMS)
params.update(config["params"])
params.update(config["hosts"][host])
# Setting some extra ansible paramters.
params["ansible_ssh_host"] = params["ip"]
inventory.write("%s\t%s\n" % (host, " ".join(("%s=%s" % (k,v) for k,v in params.items()))))
# Generating the Vagrantfile
env = jinja2.Environment(loader=jinja2.FileSystemLoader(here + "/templates/"))
template = env.get_template('Vagrantfile.j2')
template.stream(**config).dump(here + '/vagrant/Vagrantfile')
# Generating group vars for kafka
with open(here + "/playbook/group_vars/kafka.yml", "w") as gv:
gv.write("---\n")
gv.write("hosts:\n")
for (host, params) in config["hosts"].items():
gv.write(" %s: '%s.%s'\n" % (params["ip"], params["hostname"], config["params"]["domain" ]))
gv.write("kafka:\n")
gv.write(" hosts:\n")
for (host, params) in config["hosts"].items():
gv.write(" - %s.%s\n" % (params["hostname"], config["params"]["domain" ]))
| [((308, 321), 'json.load', 'json.load', (['rf'], {}), '(rf)\n', (317, 321), False, 'import json\n'), ((332, 376), 'json.dumps', 'json.dumps', (['config'], {'sort_keys': '(True)', 'indent': '(4)'}), '(config, sort_keys=True, indent=4)\n', (342, 376), False, 'import json\n'), ((1075, 1120), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', (["(here + '/templates/')"], {}), "(here + '/templates/')\n", (1098, 1120), False, 'import jinja2\n')] |
lmaurits/harvest | harvest/models/beastsimulator.py | df6b549096da8ae2f4ed38aa2be19c7e82fa60e3 | import os
import harvest.dataframe
from harvest.models.simulator import Simulator
class BeastSimulator(Simulator):
def __init__(self, tree, n_features):
Simulator.__init__(self, tree, n_features)
def generate_beast_xml(self):
# Subclasses should implement this
return None
def generate_data(self):
# Generate BEAST XML file to do simulation
xml = self.generate_beast_xml()
temp_filename = xml.write_file(overwrite=True)
# Run BEAST simulation
os.system("beast %s > /dev/null" % temp_filename)
# Delete BEAST XML file
os.remove(temp_filename)
# Read simulated data
data = harvest.dataframe.read_from_beast_xml(xml.output_filename)
# Delete simualted data
os.remove(xml.output_filename)
self.data = data
self.data.datatype = self.datatype
| [((168, 210), 'harvest.models.simulator.Simulator.__init__', 'Simulator.__init__', (['self', 'tree', 'n_features'], {}), '(self, tree, n_features)\n', (186, 210), False, 'from harvest.models.simulator import Simulator\n'), ((524, 573), 'os.system', 'os.system', (["('beast %s > /dev/null' % temp_filename)"], {}), "('beast %s > /dev/null' % temp_filename)\n", (533, 573), False, 'import os\n'), ((615, 639), 'os.remove', 'os.remove', (['temp_filename'], {}), '(temp_filename)\n', (624, 639), False, 'import os\n'), ((784, 814), 'os.remove', 'os.remove', (['xml.output_filename'], {}), '(xml.output_filename)\n', (793, 814), False, 'import os\n')] |
DutChen18/slime-clusters-cuda | assimilator.py | 186d198665a017cf0eacde33765b6cb3cb4aecb5 | # pylint: skip-file
import os
from assimilator import *
from Boinc import boinc_project_path
class SlimeClustersAssimilator(Assimilator):
def __init__(self):
Assimilator.__init__(self)
def assimilate_handler(self, wu, results, canonical_result):
if canonical_result == None:
return
src_file = self.get_file_path(canonical_result)
dst_dir = boinc_project_path.project_path('slime-clusters')
dst_file = os.path.join(dst_dir, 'results.txt')
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(src_file, 'r') as src, open(dst_file, 'a') as dst:
dst.writelines(src.readlines())
if __name__ == "__main__":
SlimeClustersAssimilator().run() | [((404, 453), 'Boinc.boinc_project_path.project_path', 'boinc_project_path.project_path', (['"""slime-clusters"""'], {}), "('slime-clusters')\n", (435, 453), False, 'from Boinc import boinc_project_path\n'), ((473, 509), 'os.path.join', 'os.path.join', (['dst_dir', '"""results.txt"""'], {}), "(dst_dir, 'results.txt')\n", (485, 509), False, 'import os\n'), ((526, 549), 'os.path.exists', 'os.path.exists', (['dst_dir'], {}), '(dst_dir)\n', (540, 549), False, 'import os\n'), ((563, 583), 'os.makedirs', 'os.makedirs', (['dst_dir'], {}), '(dst_dir)\n', (574, 583), False, 'import os\n')] |
Rubtsowa/modin | modin/core/execution/ray/implementations/cudf_on_ray/dataframe/dataframe.py | 6550939753c76e896ef2bfd65bb9468d6ad161d7 | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses class that implements ``PandasOnRayDataframe`` class using cuDF."""
import numpy as np
import ray
from ..partitioning.partition import cuDFOnRayDataframePartition
from ..partitioning.partition_manager import cuDFOnRayDataframePartitionManager
from modin.core.execution.ray.implementations.pandas_on_ray.dataframe.dataframe import (
PandasOnRayDataframe,
)
from modin.error_message import ErrorMessage
class cuDFOnRayDataframe(PandasOnRayDataframe):
"""
The class implements the interface in ``PandasOnRayDataframe`` using cuDF.
Parameters
----------
partitions : np.ndarray
A 2D NumPy array of partitions.
index : sequence
The index for the dataframe. Converted to a ``pandas.Index``.
columns : sequence
The columns object for the dataframe. Converted to a ``pandas.Index``.
row_lengths : list, optional
The length of each partition in the rows. The "height" of
each of the block partitions. Is computed if not provided.
column_widths : list, optional
The width of each partition in the columns. The "width" of
each of the block partitions. Is computed if not provided.
dtypes : pandas.Series, optional
The data types for the dataframe columns.
"""
_partition_mgr_cls = cuDFOnRayDataframePartitionManager
def synchronize_labels(self, axis=None):
"""
Synchronize labels by applying the index object (Index or Columns) to the partitions eagerly.
Parameters
----------
axis : {0, 1, None}, default: None
The axis to apply to. If None, it applies to both axes.
"""
ErrorMessage.catch_bugs_and_request_email(
axis is not None and axis not in [0, 1]
)
cum_row_lengths = np.cumsum([0] + self._row_lengths)
cum_col_widths = np.cumsum([0] + self._column_widths)
def apply_idx_objs(df, idx, cols, axis):
# cudf does not support set_axis. It only supports rename with 1-to-1 mapping.
# Therefore, we need to create the dictionary that have the relationship between
# current index and new ones.
idx = {df.index[i]: idx[i] for i in range(len(idx))}
cols = {df.index[i]: cols[i] for i in range(len(cols))}
if axis == 0:
return df.rename(index=idx)
elif axis == 1:
return df.rename(columns=cols)
else:
return df.rename(index=idx, columns=cols)
keys = np.array(
[
[
self._partitions[i][j].apply(
apply_idx_objs,
idx=self.index[
slice(cum_row_lengths[i], cum_row_lengths[i + 1])
],
cols=self.columns[
slice(cum_col_widths[j], cum_col_widths[j + 1])
],
axis=axis,
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
self._partitions = np.array(
[
[
cuDFOnRayDataframePartition(
self._partitions[i][j].get_gpu_manager(),
keys[i][j],
self._partitions[i][j]._length_cache,
self._partitions[i][j]._width_cache,
)
for j in range(len(keys[i]))
]
for i in range(len(keys))
]
)
def mask(
self,
row_indices=None,
row_numeric_idx=None,
col_indices=None,
col_numeric_idx=None,
):
"""
Lazily select columns or rows from given indices.
Parameters
----------
row_indices : list of hashable, optional
The row labels to extract.
row_numeric_idx : list of int, optional
The row indices to extract.
col_indices : list of hashable, optional
The column labels to extract.
col_numeric_idx : list of int, optional
The column indices to extract.
Returns
-------
cuDFOnRayDataframe
A new ``cuDFOnRayDataframe`` from the mask provided.
Notes
-----
If both `row_indices` and `row_numeric_idx` are set, `row_indices` will be used.
The same rule applied to `col_indices` and `col_numeric_idx`.
"""
if isinstance(row_numeric_idx, slice) and (
row_numeric_idx == slice(None) or row_numeric_idx == slice(0, None)
):
row_numeric_idx = None
if isinstance(col_numeric_idx, slice) and (
col_numeric_idx == slice(None) or col_numeric_idx == slice(0, None)
):
col_numeric_idx = None
if (
row_indices is None
and row_numeric_idx is None
and col_indices is None
and col_numeric_idx is None
):
return self.copy()
if row_indices is not None:
row_numeric_idx = self.index.get_indexer_for(row_indices)
if row_numeric_idx is not None:
row_partitions_list = self._get_dict_of_block_index(0, row_numeric_idx)
if isinstance(row_numeric_idx, slice):
# Row lengths for slice are calculated as the length of the slice
# on the partition. Often this will be the same length as the current
# length, but sometimes it is different, thus the extra calculation.
new_row_lengths = [
len(range(*idx.indices(self._row_lengths[p])))
for p, idx in row_partitions_list.items()
]
# Use the slice to calculate the new row index
new_index = self.index[row_numeric_idx]
else:
new_row_lengths = [len(idx) for _, idx in row_partitions_list.items()]
new_index = self.index[sorted(row_numeric_idx)]
else:
row_partitions_list = {
i: slice(None) for i in range(len(self._row_lengths))
}
new_row_lengths = self._row_lengths
new_index = self.index
if col_indices is not None:
col_numeric_idx = self.columns.get_indexer_for(col_indices)
if col_numeric_idx is not None:
col_partitions_list = self._get_dict_of_block_index(1, col_numeric_idx)
if isinstance(col_numeric_idx, slice):
# Column widths for slice are calculated as the length of the slice
# on the partition. Often this will be the same length as the current
# length, but sometimes it is different, thus the extra calculation.
new_col_widths = [
len(range(*idx.indices(self._column_widths[p])))
for p, idx in col_partitions_list.items()
]
# Use the slice to calculate the new columns
new_columns = self.columns[col_numeric_idx]
assert sum(new_col_widths) == len(
new_columns
), "{} != {}.\n{}\n{}\n{}".format(
sum(new_col_widths),
len(new_columns),
col_numeric_idx,
self._column_widths,
col_partitions_list,
)
if self._dtypes is not None:
new_dtypes = self.dtypes[col_numeric_idx]
else:
new_dtypes = None
else:
new_col_widths = [len(idx) for _, idx in col_partitions_list.items()]
new_columns = self.columns[sorted(col_numeric_idx)]
if self._dtypes is not None:
new_dtypes = self.dtypes.iloc[sorted(col_numeric_idx)]
else:
new_dtypes = None
else:
col_partitions_list = {
i: slice(None) for i in range(len(self._column_widths))
}
new_col_widths = self._column_widths
new_columns = self.columns
if self._dtypes is not None:
new_dtypes = self.dtypes
else:
new_dtypes = None
key_and_gpus = np.array(
[
[
[
self._partitions[row_idx][col_idx].mask(
row_internal_indices, col_internal_indices
),
self._partitions[row_idx][col_idx].get_gpu_manager(),
]
for col_idx, col_internal_indices in col_partitions_list.items()
if isinstance(col_internal_indices, slice)
or len(col_internal_indices) > 0
]
for row_idx, row_internal_indices in row_partitions_list.items()
if isinstance(row_internal_indices, slice)
or len(row_internal_indices) > 0
]
)
shape = key_and_gpus.shape[:2]
keys = ray.get(key_and_gpus[:, :, 0].flatten().tolist())
gpu_managers = key_and_gpus[:, :, 1].flatten().tolist()
new_partitions = self._partition_mgr_cls._create_partitions(
keys, gpu_managers
).reshape(shape)
intermediate = self.__constructor__(
new_partitions,
new_index,
new_columns,
new_row_lengths,
new_col_widths,
new_dtypes,
)
# Check if monotonically increasing, return if it is. Fast track code path for
# common case to keep it fast.
if (
row_numeric_idx is None
or isinstance(row_numeric_idx, slice)
or len(row_numeric_idx) == 1
or np.all(row_numeric_idx[1:] >= row_numeric_idx[:-1])
) and (
col_numeric_idx is None
or isinstance(col_numeric_idx, slice)
or len(col_numeric_idx) == 1
or np.all(col_numeric_idx[1:] >= col_numeric_idx[:-1])
):
return intermediate
# The new labels are often smaller than the old labels, so we can't reuse the
# original order values because those were mapped to the original data. We have
# to reorder here based on the expected order from within the data.
# We create a dictionary mapping the position of the numeric index with respect
# to all others, then recreate that order by mapping the new order values from
# the old. This information is sent to `_reorder_labels`.
if row_numeric_idx is not None:
row_order_mapping = dict(
zip(sorted(row_numeric_idx), range(len(row_numeric_idx)))
)
new_row_order = [row_order_mapping[idx] for idx in row_numeric_idx]
else:
new_row_order = None
if col_numeric_idx is not None:
col_order_mapping = dict(
zip(sorted(col_numeric_idx), range(len(col_numeric_idx)))
)
new_col_order = [col_order_mapping[idx] for idx in col_numeric_idx]
else:
new_col_order = None
return intermediate._reorder_labels(
row_numeric_idx=new_row_order, col_numeric_idx=new_col_order
)
| [((2458, 2544), 'modin.error_message.ErrorMessage.catch_bugs_and_request_email', 'ErrorMessage.catch_bugs_and_request_email', (['(axis is not None and axis not in [0, 1])'], {}), '(axis is not None and axis not in\n [0, 1])\n', (2499, 2544), False, 'from modin.error_message import ErrorMessage\n'), ((2590, 2624), 'numpy.cumsum', 'np.cumsum', (['([0] + self._row_lengths)'], {}), '([0] + self._row_lengths)\n', (2599, 2624), True, 'import numpy as np\n'), ((2650, 2686), 'numpy.cumsum', 'np.cumsum', (['([0] + self._column_widths)'], {}), '([0] + self._column_widths)\n', (2659, 2686), True, 'import numpy as np\n'), ((10833, 10884), 'numpy.all', 'np.all', (['(row_numeric_idx[1:] >= row_numeric_idx[:-1])'], {}), '(row_numeric_idx[1:] >= row_numeric_idx[:-1])\n', (10839, 10884), True, 'import numpy as np\n'), ((11043, 11094), 'numpy.all', 'np.all', (['(col_numeric_idx[1:] >= col_numeric_idx[:-1])'], {}), '(col_numeric_idx[1:] >= col_numeric_idx[:-1])\n', (11049, 11094), True, 'import numpy as np\n')] |
mw5868/University | Exoplanet_Population.py | 076c9b001dbfe3765607877be4f89ccf86a88331 | from astropy.table import Table, Column
import matplotlib.pyplot as plt
#url = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets&select=pl_hostname,ra,dec&order=dec&format=csv"
url = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets"
# This API returns Hostname, RA and Dec
t = Table.read(url, format="csv")
t_b = t[t["pl_letter"] == "b"]
t_c = t[t["pl_letter"] == "c"]
t_d = t[t["pl_letter"] == "d"]
t_e = t[t["pl_letter"] == "e"]
t_f = t[t["pl_letter"] == "f"]
t_g = t[t["pl_letter"] == "g"]
t_h = t[t["pl_letter"] == "h"]
t_i = t[t["pl_letter"] == "i"]
fig = plt.figure()
ax = fig.add_subplot(1,1,1,aspect="equal")
ax.scatter(t_b["ra"],t_b["dec"],color="Black",label = "2 Planets")
ax.scatter(t_c["ra"],t_c["dec"],color="red", label = "3 Planets")
ax.scatter(t_d["ra"],t_d["dec"],color="blue", label = "4 Planets")
ax.scatter(t_e["ra"],t_e["dec"],color="green", label = "5 Planets")
ax.scatter(t_f["ra"],t_f["dec"],color="yellow", label = "6 Planets")
ax.scatter(t_g["ra"],t_g["dec"],color="purple", label = "7 Planets")
ax.scatter(t_h["ra"],t_h["dec"],color="orange", label = "8 Planets")
ax.scatter(t_i["ra"],t_i["dec"],color="cyan", label = "9 Planets")
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.set_xlim(360,0)
ax.set_ylim(-90,90)
ax.set_ylabel("DEC")
ax.set_xlabel("RA")
ax.set_title("Positions of Explanets by number of planets in system")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show() | [((359, 388), 'astropy.table.Table.read', 'Table.read', (['url'], {'format': '"""csv"""'}), "(url, format='csv')\n", (369, 388), False, 'from astropy.table import Table, Column\n'), ((645, 657), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (655, 657), True, 'import matplotlib.pyplot as plt\n'), ((1455, 1517), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)\n', (1465, 1517), True, 'import matplotlib.pyplot as plt\n'), ((1517, 1527), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1525, 1527), True, 'import matplotlib.pyplot as plt\n')] |
marthoc/pykuna | pykuna/errors.py | f5bf02f26e1931b35becde6e1da58fb8bb0cc2d8 | class KunaError(Exception):
pass
class AuthenticationError(KunaError):
"""Raised when authentication fails."""
pass
class UnauthorizedError(KunaError):
"""Raised when an API call fails as unauthorized (401)."""
pass
| [] |
henrimitte/Project-Euler | src/pe_problem74.py | 77fd9f5b076d1ca2e5ed4ef94bf8d32d9ed611eb | from tools import factorial
def solve():
fa = tuple(factorial(x) for x in range(10))
def _sum_factorial_of_digits(n: int) -> int:
s = 0
while n > 0:
s += fa[n % 10]
n //= 10
return s
limit = 1000000
loops = [0 for x in range(limit)]
for i in range(limit):
if not loops[i]:
loop_not_found = True
chain = [i]
n = i
while loop_not_found:
n = _sum_factorial_of_digits(n)
if n in chain:
loop_not_found = False
else:
chain.append(n)
loops[i] = len(chain)
sixty = sum(filter(lambda v: v == 60, loops)) // 60
print(sixty)
if __name__ == '__main__':
solve()
| [((58, 70), 'tools.factorial', 'factorial', (['x'], {}), '(x)\n', (67, 70), False, 'from tools import factorial\n')] |
ferguscan/thingsboard-gateway | thingsboard_gateway/connectors/modbus/modbus_connector.py | bc20fdb8e46f840b8538a010db2714ec6071fa5b | # Copyright 2022. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from time import sleep, time
from queue import Queue
from random import choice
from string import ascii_lowercase
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
# Try import Pymodbus library or install it and import
try:
from pymodbus.constants import Defaults
except ImportError:
print("Modbus library not found - installing...")
TBUtility.install_package("pymodbus", ">=2.3.0")
TBUtility.install_package('pyserial')
from pymodbus.constants import Defaults
try:
from twisted.internet import reactor
except ImportError:
TBUtility.install_package('twisted')
from twisted.internet import reactor
from twisted.internet import reactor
from pymodbus.bit_write_message import WriteSingleCoilResponse, WriteMultipleCoilsResponse
from pymodbus.register_write_message import WriteMultipleRegistersResponse, WriteSingleRegisterResponse
from pymodbus.register_read_message import ReadRegistersResponseBase
from pymodbus.bit_read_message import ReadBitsResponseBase
from pymodbus.client.sync import ModbusTcpClient, ModbusUdpClient, ModbusSerialClient
from pymodbus.client.sync import ModbusRtuFramer, ModbusSocketFramer, ModbusAsciiFramer
from pymodbus.exceptions import ConnectionException
from pymodbus.server.asynchronous import StartTcpServer, StartUdpServer, StartSerialServer, StopServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.version import version
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.datastore import ModbusSparseDataBlock
from thingsboard_gateway.connectors.connector import Connector, log
from thingsboard_gateway.connectors.modbus.constants import *
from thingsboard_gateway.connectors.modbus.slave import Slave
from thingsboard_gateway.connectors.modbus.backward_compability_adapter import BackwardCompatibilityAdapter
from thingsboard_gateway.connectors.modbus.bytes_modbus_downlink_converter import BytesModbusDownlinkConverter
CONVERTED_DATA_SECTIONS = [ATTRIBUTES_PARAMETER, TELEMETRY_PARAMETER]
FRAMER_TYPE = {
'rtu': ModbusRtuFramer,
'socket': ModbusSocketFramer,
'ascii': ModbusAsciiFramer
}
SLAVE_TYPE = {
'tcp': StartTcpServer,
'udp': StartUdpServer,
'serial': StartSerialServer
}
FUNCTION_TYPE = {
'coils_initializer': 'co',
'holding_registers': 'hr',
'input_registers': 'ir',
'discrete_inputs': 'di'
}
FUNCTION_CODE_WRITE = {
'holding_registers': (6, 16),
'coils_initializer': (5, 15)
}
FUNCTION_CODE_READ = {
'holding_registers': 3,
'coils_initializer': 1,
'input_registers': 4,
'discrete_inputs': 2
}
class ModbusConnector(Connector, Thread):
process_requests = Queue(-1)
def __init__(self, gateway, config, connector_type):
self.statistics = {STATISTIC_MESSAGE_RECEIVED_PARAMETER: 0,
STATISTIC_MESSAGE_SENT_PARAMETER: 0}
super().__init__()
self.__gateway = gateway
self._connector_type = connector_type
self.__backward_compatibility_adapter = BackwardCompatibilityAdapter(config, gateway.get_config_path())
self.__config = self.__backward_compatibility_adapter.convert()
self.setName(self.__config.get("name", 'Modbus Default ' + ''.join(choice(ascii_lowercase) for _ in range(5))))
self.__connected = False
self.__stopped = False
self.daemon = True
if self.__config.get('slave'):
self.__slave_thread = Thread(target=self.__configure_and_run_slave, args=(self.__config['slave'],),
daemon=True, name='Gateway as a slave')
self.__slave_thread.start()
if config['slave'].get('sendDataToThingsBoard', False):
self.__modify_main_config()
self.__slaves = []
self.__load_slaves()
def is_connected(self):
return self.__connected
def open(self):
self.__stopped = False
self.start()
def run(self):
self.__connected = True
while True:
if not self.__stopped and not ModbusConnector.process_requests.empty():
thread = Thread(target=self.__process_slaves, daemon=True)
thread.start()
if self.__stopped:
break
sleep(.2)
@staticmethod
def __configure_and_run_slave(config):
identity = None
if config.get('identity'):
identity = ModbusDeviceIdentification()
identity.VendorName = config['identity'].get('vendorName', '')
identity.ProductCode = config['identity'].get('productCode', '')
identity.VendorUrl = config['identity'].get('vendorUrl', '')
identity.ProductName = config['identity'].get('productName', '')
identity.ModelName = config['identity'].get('ModelName', '')
identity.MajorMinorRevision = version.short()
blocks = {}
for (key, value) in config.get('values').items():
values = {}
converter = BytesModbusDownlinkConverter({})
for item in value:
for section in ('attributes', 'timeseries', 'attributeUpdates', 'rpc'):
for val in item.get(section, []):
function_code = FUNCTION_CODE_WRITE[key][0] if val['objectsCount'] <= 1 else \
FUNCTION_CODE_WRITE[key][1]
converted_value = converter.convert(
{**val,
'device': config.get('deviceName', 'Gateway'), 'functionCode': function_code,
'byteOrder': config['byteOrder'], 'wordOrder': config['wordOrder']},
{'data': {'params': val['value']}})
values[val['address'] + 1] = converted_value
blocks[FUNCTION_TYPE[key]] = ModbusSparseDataBlock(values)
context = ModbusServerContext(slaves=ModbusSlaveContext(**blocks), single=True)
SLAVE_TYPE[config['type']](context, identity=identity,
address=(config.get('host'), config.get('port')) if (
config['type'] == 'tcp' or 'udp') else None,
port=config.get('port') if config['type'] == 'serial' else None,
framer=FRAMER_TYPE[config['method']])
def __modify_main_config(self):
config = self.__config['slave']
values = config.pop('values')
device = config
for (register, reg_values) in values.items():
for value in reg_values:
for section in ('attributes', 'timeseries', 'attributeUpdates', 'rpc'):
if not device.get(section):
device[section] = []
for item in value.get(section, []):
device[section].append({**item, 'functionCode': FUNCTION_CODE_READ[
register] if section not in ('attributeUpdates', 'rpc') else item['functionCode']})
self.__config['master']['slaves'].append(device)
def __load_slaves(self):
self.__slaves = [
Slave(**{**device, 'connector': self, 'gateway': self.__gateway, 'callback': ModbusConnector.callback}) for
device in self.__config.get('master', {'slaves': []}).get('slaves', [])]
@classmethod
def callback(cls, slave):
cls.process_requests.put(slave)
@property
def connector_type(self):
return self._connector_type
def __convert_and_save_data(self, config_tuple):
device, current_device_config, config, device_responses = config_tuple
converted_data = {}
try:
converted_data = device.config[UPLINK_PREFIX + CONVERTER_PARAMETER].convert(
config=config,
data=device_responses)
except Exception as e:
log.error(e)
to_send = {DEVICE_NAME_PARAMETER: converted_data[DEVICE_NAME_PARAMETER],
DEVICE_TYPE_PARAMETER: converted_data[DEVICE_TYPE_PARAMETER],
TELEMETRY_PARAMETER: [],
ATTRIBUTES_PARAMETER: []
}
if current_device_config.get('sendDataOnlyOnChange'):
self.statistics[STATISTIC_MESSAGE_RECEIVED_PARAMETER] += 1
for converted_data_section in CONVERTED_DATA_SECTIONS:
for current_section_dict in converted_data[converted_data_section]:
for key, value in current_section_dict.items():
if device.config[LAST_PREFIX + converted_data_section].get(key) is None or \
device.config[LAST_PREFIX + converted_data_section][key] != value:
device.config[LAST_PREFIX + converted_data_section][key] = value
to_send[converted_data_section].append({key: value})
elif converted_data and current_device_config.get('sendDataOnlyOnChange') is None or \
not current_device_config.get('sendDataOnlyOnChange'):
self.statistics[STATISTIC_MESSAGE_RECEIVED_PARAMETER] += 1
for converted_data_section in CONVERTED_DATA_SECTIONS:
device.config[LAST_PREFIX + converted_data_section] = converted_data[
converted_data_section]
to_send[converted_data_section] = converted_data[converted_data_section]
if to_send.get(ATTRIBUTES_PARAMETER) or to_send.get(TELEMETRY_PARAMETER):
self.__gateway.send_to_storage(self.get_name(), to_send)
self.statistics[STATISTIC_MESSAGE_SENT_PARAMETER] += 1
def close(self):
self.__stopped = True
self.__stop_connections_to_masters()
if reactor.running:
StopServer()
log.info('%s has been stopped.', self.get_name())
def get_name(self):
return self.name
def __process_slaves(self):
# TODO: write documentation
device = ModbusConnector.process_requests.get()
device_responses = {'timeseries': {}, 'attributes': {}}
current_device_config = {}
try:
for config_section in device_responses:
if device.config.get(config_section) is not None:
current_device_config = device.config
self.__connect_to_current_master(device)
if not device.config['master'].is_socket_open() or not len(
current_device_config[config_section]):
continue
# Reading data from device
for interested_data in range(len(current_device_config[config_section])):
current_data = current_device_config[config_section][interested_data]
current_data[DEVICE_NAME_PARAMETER] = device
input_data = self.__function_to_device(device, current_data)
device_responses[config_section][current_data[TAG_PARAMETER]] = {
"data_sent": current_data,
"input_data": input_data}
log.debug("Checking %s for device %s", config_section, device)
log.debug('Device response: ', device_responses)
if device_responses.get('timeseries') or device_responses.get('attributes'):
self.__convert_and_save_data((device, current_device_config, {
**current_device_config,
BYTE_ORDER_PARAMETER: current_device_config.get(BYTE_ORDER_PARAMETER,
device.byte_order),
WORD_ORDER_PARAMETER: current_device_config.get(WORD_ORDER_PARAMETER,
device.word_order)
}, device_responses))
except ConnectionException:
sleep(5)
log.error("Connection lost! Reconnecting...")
except Exception as e:
log.exception(e)
def __connect_to_current_master(self, device=None):
# TODO: write documentation
connect_attempt_count = 5
connect_attempt_time_ms = 100
wait_after_failed_attempts_ms = 300000
if device.config.get('master') is None:
device.config['master'], device.config['available_functions'] = self.__configure_master(device.config)
if connect_attempt_count < 1:
connect_attempt_count = 1
connect_attempt_time_ms = device.config.get('connectAttemptTimeMs', connect_attempt_time_ms)
if connect_attempt_time_ms < 500:
connect_attempt_time_ms = 500
wait_after_failed_attempts_ms = device.config.get('waitAfterFailedAttemptsMs', wait_after_failed_attempts_ms)
if wait_after_failed_attempts_ms < 1000:
wait_after_failed_attempts_ms = 1000
current_time = time() * 1000
if not device.config['master'].is_socket_open():
if device.config['connection_attempt'] >= connect_attempt_count and current_time - device.config[
'last_connection_attempt_time'] >= wait_after_failed_attempts_ms:
device.config['connection_attempt'] = 0
while not device.config['master'].is_socket_open() \
and device.config['connection_attempt'] < connect_attempt_count \
and current_time - device.config.get('last_connection_attempt_time',
0) >= connect_attempt_time_ms:
device.config['connection_attempt'] = device.config[
'connection_attempt'] + 1
device.config['last_connection_attempt_time'] = current_time
log.debug("Modbus trying connect to %s", device)
device.config['master'].connect()
if device.config['connection_attempt'] == connect_attempt_count:
log.warn("Maximum attempt count (%i) for device \"%s\" - encountered.", connect_attempt_count,
device)
if device.config['connection_attempt'] >= 0 and device.config['master'].is_socket_open():
device.config['connection_attempt'] = 0
device.config['last_connection_attempt_time'] = current_time
@staticmethod
def __configure_master(config):
current_config = config
current_config["rtu"] = FRAMER_TYPE[current_config['method']]
if current_config.get('type') == 'tcp':
master = ModbusTcpClient(current_config["host"],
current_config["port"],
current_config["rtu"],
timeout=current_config["timeout"],
retry_on_empty=current_config["retry_on_empty"],
retry_on_invalid=current_config["retry_on_invalid"],
retries=current_config["retries"])
elif current_config.get(TYPE_PARAMETER) == 'udp':
master = ModbusUdpClient(current_config["host"],
current_config["port"],
current_config["rtu"],
timeout=current_config["timeout"],
retry_on_empty=current_config["retry_on_empty"],
retry_on_invalid=current_config["retry_on_invalid"],
retries=current_config["retries"])
elif current_config.get(TYPE_PARAMETER) == 'serial':
master = ModbusSerialClient(method=current_config["method"],
port=current_config["port"],
timeout=current_config["timeout"],
retry_on_empty=current_config["retry_on_empty"],
retry_on_invalid=current_config["retry_on_invalid"],
retries=current_config["retries"],
baudrate=current_config["baudrate"],
stopbits=current_config["stopbits"],
bytesize=current_config["bytesize"],
parity=current_config["parity"],
strict=current_config["strict"])
else:
raise Exception("Invalid Modbus transport type.")
available_functions = {
1: master.read_coils,
2: master.read_discrete_inputs,
3: master.read_holding_registers,
4: master.read_input_registers,
5: master.write_coil,
6: master.write_register,
15: master.write_coils,
16: master.write_registers,
}
return master, available_functions
def __stop_connections_to_masters(self):
for slave in self.__slaves:
if slave.config.get('master') is not None and slave.config.get('master').is_socket_open():
slave.config['master'].close()
@staticmethod
def __function_to_device(device, config):
function_code = config.get('functionCode')
result = None
if function_code == 1:
result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER],
count=config.get(OBJECTS_COUNT_PARAMETER,
config.get("registersCount",
config.get(
"registerCount",
1))) * 8,
unit=device.config['unitId'])
elif function_code in (2, 3, 4):
result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER],
count=config.get(OBJECTS_COUNT_PARAMETER,
config.get("registersCount",
config.get(
"registerCount",
1))),
unit=device.config['unitId'])
elif function_code in (5, 15):
result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER],
value=config[PAYLOAD_PARAMETER],
unit=device.config['unitId'] * 8)
elif function_code in (6, 16):
result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER],
values=config[PAYLOAD_PARAMETER],
unit=device.config['unitId'])
else:
log.error("Unknown Modbus function with code: %s", function_code)
log.debug("With result %s", str(result))
if "Exception" in str(result):
log.exception(result)
return result
def on_attributes_update(self, content):
try:
device = tuple(filter(lambda slave: slave.name == content[DEVICE_SECTION_PARAMETER], self.__slaves))[0]
for attribute_updates_command_config in device.config['attributeUpdates']:
for attribute_updated in content[DATA_PARAMETER]:
if attribute_updates_command_config[TAG_PARAMETER] == attribute_updated:
to_process = {
DEVICE_SECTION_PARAMETER: content[DEVICE_SECTION_PARAMETER],
DATA_PARAMETER: {
RPC_METHOD_PARAMETER: attribute_updated,
RPC_PARAMS_PARAMETER: content[DATA_PARAMETER][attribute_updated]
}
}
attribute_updates_command_config['byteOrder'] = device.byte_order or 'LITTLE'
attribute_updates_command_config['wordOrder'] = device.word_order or 'LITTLE'
self.__process_request(to_process, attribute_updates_command_config,
request_type='attributeUpdates')
except Exception as e:
log.exception(e)
def server_side_rpc_handler(self, server_rpc_request):
try:
if server_rpc_request.get(DEVICE_SECTION_PARAMETER) is not None:
log.debug("Modbus connector received rpc request for %s with server_rpc_request: %s",
server_rpc_request[DEVICE_SECTION_PARAMETER],
server_rpc_request)
device = tuple(
filter(
lambda slave: slave.name == server_rpc_request[DEVICE_SECTION_PARAMETER], self.__slaves
)
)[0]
if isinstance(device.config[RPC_SECTION], dict):
rpc_command_config = device.config[RPC_SECTION].get(
server_rpc_request[DATA_PARAMETER][RPC_METHOD_PARAMETER])
if rpc_command_config is not None:
self.__process_request(server_rpc_request, rpc_command_config)
elif isinstance(device.config[RPC_SECTION], list):
for rpc_command_config in device.config[RPC_SECTION]:
if rpc_command_config[TAG_PARAMETER] == server_rpc_request[DATA_PARAMETER][
RPC_METHOD_PARAMETER]:
self.__process_request(server_rpc_request, rpc_command_config)
break
else:
log.error("Received rpc request, but method %s not found in config for %s.",
server_rpc_request[DATA_PARAMETER].get(RPC_METHOD_PARAMETER),
self.get_name())
self.__gateway.send_rpc_reply(server_rpc_request[DEVICE_SECTION_PARAMETER],
server_rpc_request[DATA_PARAMETER][RPC_ID_PARAMETER],
{server_rpc_request[DATA_PARAMETER][
RPC_METHOD_PARAMETER]: "METHOD NOT FOUND!"})
else:
log.debug("Received RPC to connector: %r", server_rpc_request)
except Exception as e:
log.exception(e)
def __process_request(self, content, rpc_command_config, request_type='RPC'):
log.debug('Processing %s request', request_type)
if rpc_command_config is not None:
device = tuple(filter(lambda slave: slave.name == content[DEVICE_SECTION_PARAMETER], self.__slaves))[0]
rpc_command_config[UNIT_ID_PARAMETER] = device.config['unitId']
rpc_command_config[BYTE_ORDER_PARAMETER] = device.config.get("byteOrder", "LITTLE")
rpc_command_config[WORD_ORDER_PARAMETER] = device.config.get("wordOrder", "LITTLE")
self.__connect_to_current_master(device)
if rpc_command_config.get(FUNCTION_CODE_PARAMETER) in (6, 16):
converted_data = device.config[DOWNLINK_PREFIX + CONVERTER_PARAMETER].convert(rpc_command_config,
content)
try:
rpc_command_config[PAYLOAD_PARAMETER] = converted_data[0]
except IndexError and TypeError:
rpc_command_config[PAYLOAD_PARAMETER] = converted_data
elif rpc_command_config.get(FUNCTION_CODE_PARAMETER) in (5, 15):
converted_data = device.config[DOWNLINK_PREFIX + CONVERTER_PARAMETER].convert(rpc_command_config,
content)
rpc_command_config[PAYLOAD_PARAMETER] = converted_data
try:
response = self.__function_to_device(device, rpc_command_config)
except Exception as e:
log.exception(e)
response = e
if isinstance(response, (ReadRegistersResponseBase, ReadBitsResponseBase)):
to_converter = {
RPC_SECTION: {content[DATA_PARAMETER][RPC_METHOD_PARAMETER]: {"data_sent": rpc_command_config,
"input_data": response}}}
response = device.config[
UPLINK_PREFIX + CONVERTER_PARAMETER].convert(
config={**device.config,
BYTE_ORDER_PARAMETER: device.byte_order,
WORD_ORDER_PARAMETER: device.word_order
},
data=to_converter)
log.debug("Received %s method: %s, result: %r", request_type,
content[DATA_PARAMETER][RPC_METHOD_PARAMETER],
response)
elif isinstance(response, (WriteMultipleRegistersResponse,
WriteMultipleCoilsResponse,
WriteSingleCoilResponse,
WriteSingleRegisterResponse)):
log.debug("Write %r", str(response))
response = {"success": True}
if content.get(RPC_ID_PARAMETER) or (
content.get(DATA_PARAMETER) is not None and content[DATA_PARAMETER].get(RPC_ID_PARAMETER)):
if isinstance(response, Exception):
self.__gateway.send_rpc_reply(content[DEVICE_SECTION_PARAMETER],
content[DATA_PARAMETER][RPC_ID_PARAMETER],
{content[DATA_PARAMETER][RPC_METHOD_PARAMETER]: str(response)})
else:
self.__gateway.send_rpc_reply(content[DEVICE_SECTION_PARAMETER],
content[DATA_PARAMETER][RPC_ID_PARAMETER],
response)
log.debug("%r", response)
| [((3326, 3335), 'queue.Queue', 'Queue', (['(-1)'], {}), '(-1)\n', (3331, 3335), False, 'from queue import Queue\n'), ((1008, 1056), 'thingsboard_gateway.tb_utility.tb_utility.TBUtility.install_package', 'TBUtility.install_package', (['"""pymodbus"""', '""">=2.3.0"""'], {}), "('pymodbus', '>=2.3.0')\n", (1033, 1056), False, 'from thingsboard_gateway.tb_utility.tb_utility import TBUtility\n'), ((1061, 1098), 'thingsboard_gateway.tb_utility.tb_utility.TBUtility.install_package', 'TBUtility.install_package', (['"""pyserial"""'], {}), "('pyserial')\n", (1086, 1098), False, 'from thingsboard_gateway.tb_utility.tb_utility import TBUtility\n'), ((1214, 1250), 'thingsboard_gateway.tb_utility.tb_utility.TBUtility.install_package', 'TBUtility.install_package', (['"""twisted"""'], {}), "('twisted')\n", (1239, 1250), False, 'from thingsboard_gateway.tb_utility.tb_utility import TBUtility\n'), ((24292, 24340), 'thingsboard_gateway.connectors.connector.log.debug', 'log.debug', (['"""Processing %s request"""', 'request_type'], {}), "('Processing %s request', request_type)\n", (24301, 24340), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((4103, 4225), 'threading.Thread', 'Thread', ([], {'target': 'self.__configure_and_run_slave', 'args': "(self.__config['slave'],)", 'daemon': '(True)', 'name': '"""Gateway as a slave"""'}), "(target=self.__configure_and_run_slave, args=(self.__config['slave'],\n ), daemon=True, name='Gateway as a slave')\n", (4109, 4225), False, 'from threading import Thread\n'), ((4936, 4946), 'time.sleep', 'sleep', (['(0.2)'], {}), '(0.2)\n', (4941, 4946), False, 'from time import sleep, time\n'), ((5090, 5118), 'pymodbus.device.ModbusDeviceIdentification', 'ModbusDeviceIdentification', ([], {}), '()\n', (5116, 5118), False, 'from pymodbus.device import ModbusDeviceIdentification\n'), ((5536, 5551), 'pymodbus.version.version.short', 'version.short', ([], {}), '()\n', (5549, 5551), False, 'from pymodbus.version import version\n'), ((5679, 5711), 'thingsboard_gateway.connectors.modbus.bytes_modbus_downlink_converter.BytesModbusDownlinkConverter', 'BytesModbusDownlinkConverter', (['{}'], {}), '({})\n', (5707, 5711), False, 'from thingsboard_gateway.connectors.modbus.bytes_modbus_downlink_converter import BytesModbusDownlinkConverter\n'), ((6521, 6550), 'pymodbus.datastore.ModbusSparseDataBlock', 'ModbusSparseDataBlock', (['values'], {}), '(values)\n', (6542, 6550), False, 'from pymodbus.datastore import ModbusSparseDataBlock\n'), ((7853, 7960), 'thingsboard_gateway.connectors.modbus.slave.Slave', 'Slave', ([], {}), "(**{**device, 'connector': self, 'gateway': self.__gateway, 'callback':\n ModbusConnector.callback})\n", (7858, 7960), False, 'from thingsboard_gateway.connectors.modbus.slave import Slave\n'), ((10485, 10497), 'pymodbus.server.asynchronous.StopServer', 'StopServer', ([], {}), '()\n', (10495, 10497), False, 'from pymodbus.server.asynchronous import StartTcpServer, StartUdpServer, StartSerialServer, StopServer\n'), ((13671, 13677), 'time.time', 'time', ([], {}), '()\n', (13675, 13677), False, 'from time import sleep, time\n'), ((15354, 15625), 'pymodbus.client.sync.ModbusTcpClient', 'ModbusTcpClient', (["current_config['host']", "current_config['port']", "current_config['rtu']"], {'timeout': "current_config['timeout']", 'retry_on_empty': "current_config['retry_on_empty']", 'retry_on_invalid': "current_config['retry_on_invalid']", 'retries': "current_config['retries']"}), "(current_config['host'], current_config['port'],\n current_config['rtu'], timeout=current_config['timeout'],\n retry_on_empty=current_config['retry_on_empty'], retry_on_invalid=\n current_config['retry_on_invalid'], retries=current_config['retries'])\n", (15369, 15625), False, 'from pymodbus.client.sync import ModbusTcpClient, ModbusUdpClient, ModbusSerialClient\n'), ((20724, 20745), 'thingsboard_gateway.connectors.connector.log.exception', 'log.exception', (['result'], {}), '(result)\n', (20737, 20745), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((27940, 27965), 'thingsboard_gateway.connectors.connector.log.debug', 'log.debug', (['"""%r"""', 'response'], {}), "('%r', response)\n", (27949, 27965), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((4788, 4837), 'threading.Thread', 'Thread', ([], {'target': 'self.__process_slaves', 'daemon': '(True)'}), '(target=self.__process_slaves, daemon=True)\n', (4794, 4837), False, 'from threading import Thread\n'), ((6597, 6625), 'pymodbus.datastore.ModbusSlaveContext', 'ModbusSlaveContext', ([], {}), '(**blocks)\n', (6615, 6625), False, 'from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext\n'), ((8592, 8604), 'thingsboard_gateway.connectors.connector.log.error', 'log.error', (['e'], {}), '(e)\n', (8601, 8604), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((12662, 12670), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (12667, 12670), False, 'from time import sleep, time\n'), ((12683, 12728), 'thingsboard_gateway.connectors.connector.log.error', 'log.error', (['"""Connection lost! Reconnecting..."""'], {}), "('Connection lost! Reconnecting...')\n", (12692, 12728), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((12772, 12788), 'thingsboard_gateway.connectors.connector.log.exception', 'log.exception', (['e'], {}), '(e)\n', (12785, 12788), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((14570, 14618), 'thingsboard_gateway.connectors.connector.log.debug', 'log.debug', (['"""Modbus trying connect to %s"""', 'device'], {}), "('Modbus trying connect to %s', device)\n", (14579, 14618), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((15914, 16185), 'pymodbus.client.sync.ModbusUdpClient', 'ModbusUdpClient', (["current_config['host']", "current_config['port']", "current_config['rtu']"], {'timeout': "current_config['timeout']", 'retry_on_empty': "current_config['retry_on_empty']", 'retry_on_invalid': "current_config['retry_on_invalid']", 'retries': "current_config['retries']"}), "(current_config['host'], current_config['port'],\n current_config['rtu'], timeout=current_config['timeout'],\n retry_on_empty=current_config['retry_on_empty'], retry_on_invalid=\n current_config['retry_on_invalid'], retries=current_config['retries'])\n", (15929, 16185), False, 'from pymodbus.client.sync import ModbusTcpClient, ModbusUdpClient, ModbusSerialClient\n'), ((22011, 22027), 'thingsboard_gateway.connectors.connector.log.exception', 'log.exception', (['e'], {}), '(e)\n', (22024, 22027), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((22194, 22354), 'thingsboard_gateway.connectors.connector.log.debug', 'log.debug', (['"""Modbus connector received rpc request for %s with server_rpc_request: %s"""', 'server_rpc_request[DEVICE_SECTION_PARAMETER]', 'server_rpc_request'], {}), "(\n 'Modbus connector received rpc request for %s with server_rpc_request: %s',\n server_rpc_request[DEVICE_SECTION_PARAMETER], server_rpc_request)\n", (22203, 22354), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((24078, 24140), 'thingsboard_gateway.connectors.connector.log.debug', 'log.debug', (['"""Received RPC to connector: %r"""', 'server_rpc_request'], {}), "('Received RPC to connector: %r', server_rpc_request)\n", (24087, 24140), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((24184, 24200), 'thingsboard_gateway.connectors.connector.log.exception', 'log.exception', (['e'], {}), '(e)\n', (24197, 24200), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((26619, 26742), 'thingsboard_gateway.connectors.connector.log.debug', 'log.debug', (['"""Received %s method: %s, result: %r"""', 'request_type', 'content[DATA_PARAMETER][RPC_METHOD_PARAMETER]', 'response'], {}), "('Received %s method: %s, result: %r', request_type, content[\n DATA_PARAMETER][RPC_METHOD_PARAMETER], response)\n", (26628, 26742), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((11874, 11936), 'thingsboard_gateway.connectors.connector.log.debug', 'log.debug', (['"""Checking %s for device %s"""', 'config_section', 'device'], {}), "('Checking %s for device %s', config_section, device)\n", (11883, 11936), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((11957, 12005), 'thingsboard_gateway.connectors.connector.log.debug', 'log.debug', (['"""Device response: """', 'device_responses'], {}), "('Device response: ', device_responses)\n", (11966, 12005), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((14771, 14875), 'thingsboard_gateway.connectors.connector.log.warn', 'log.warn', (['"""Maximum attempt count (%i) for device "%s" - encountered."""', 'connect_attempt_count', 'device'], {}), '(\'Maximum attempt count (%i) for device "%s" - encountered.\',\n connect_attempt_count, device)\n', (14779, 14875), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((16477, 16934), 'pymodbus.client.sync.ModbusSerialClient', 'ModbusSerialClient', ([], {'method': "current_config['method']", 'port': "current_config['port']", 'timeout': "current_config['timeout']", 'retry_on_empty': "current_config['retry_on_empty']", 'retry_on_invalid': "current_config['retry_on_invalid']", 'retries': "current_config['retries']", 'baudrate': "current_config['baudrate']", 'stopbits': "current_config['stopbits']", 'bytesize': "current_config['bytesize']", 'parity': "current_config['parity']", 'strict': "current_config['strict']"}), "(method=current_config['method'], port=current_config[\n 'port'], timeout=current_config['timeout'], retry_on_empty=\n current_config['retry_on_empty'], retry_on_invalid=current_config[\n 'retry_on_invalid'], retries=current_config['retries'], baudrate=\n current_config['baudrate'], stopbits=current_config['stopbits'],\n bytesize=current_config['bytesize'], parity=current_config['parity'],\n strict=current_config['strict'])\n", (16495, 16934), False, 'from pymodbus.client.sync import ModbusTcpClient, ModbusUdpClient, ModbusSerialClient\n'), ((25852, 25868), 'thingsboard_gateway.connectors.connector.log.exception', 'log.exception', (['e'], {}), '(e)\n', (25865, 25868), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((20556, 20621), 'thingsboard_gateway.connectors.connector.log.error', 'log.error', (['"""Unknown Modbus function with code: %s"""', 'function_code'], {}), "('Unknown Modbus function with code: %s', function_code)\n", (20565, 20621), False, 'from thingsboard_gateway.connectors.connector import Connector, log\n'), ((3893, 3916), 'random.choice', 'choice', (['ascii_lowercase'], {}), '(ascii_lowercase)\n', (3899, 3916), False, 'from random import choice\n')] |
xwu20/wmg_agent | specs/test_gru_on_flat_babyai.py | 25378c8fc54eb6e0e8c9d969760a72e843572f09 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
### CONTROLS (non-tunable) ###
# general
TYPE_OF_RUN = test_episodes # train, test, test_episodes, render
NUM_EPISODES_TO_TEST = 1000
MIN_FINAL_REWARD_FOR_SUCCESS = 1.0
LOAD_MODEL_FROM = models/gru_flat_babyai.pth
SAVE_MODELS_TO = None
# worker.py
ENV = BabyAI_Env
ENV_RANDOM_SEED = 1
AGENT_RANDOM_SEED = 1
REPORTING_INTERVAL = 1
TOTAL_STEPS = 1
ANNEAL_LR = False
# A3cAgent
AGENT_NET = GRU_Network
# BabyAI_Env
BABYAI_ENV_LEVEL = BabyAI-GoToLocal-v0
USE_SUCCESS_RATE = True
SUCCESS_RATE_THRESHOLD = 0.99
HELDOUT_TESTING = False
NUM_TEST_EPISODES = 10000
OBS_ENCODER = Flat
BINARY_REWARD = True
### HYPERPARAMETERS (tunable) ###
# A3cAgent
A3C_T_MAX = 4
LEARNING_RATE = 4e-05
DISCOUNT_FACTOR = 0.9
GRADIENT_CLIP = 512.0
ENTROPY_TERM_STRENGTH = 0.02
ADAM_EPS = 1e-12
REWARD_SCALE = 2.0
WEIGHT_DECAY = 0.
# RNNs
NUM_RNN_UNITS = 96
OBS_EMBED_SIZE = 512
AC_HIDDEN_LAYER_SIZE = 4096
| [] |
meisterT/rules_haskell | haskell/private/actions/runghc.bzl | 7c0a867fc23da104ea8cbff26864894abcf137bc | """runghc support"""
load(":private/context.bzl", "render_env")
load(":private/packages.bzl", "expose_packages", "pkg_info_to_compile_flags")
load(
":private/path_utils.bzl",
"link_libraries",
"ln",
"target_unique_name",
)
load(
":private/set.bzl",
"set",
)
load(":providers.bzl", "get_ghci_extra_libs")
load("@bazel_skylib//lib:shell.bzl", "shell")
def build_haskell_runghc(
hs,
runghc_wrapper,
user_compile_flags,
extra_args,
hs_info,
cc_info,
output,
package_databases,
version,
lib_info = None):
"""Build runghc script.
Args:
hs: Haskell context.
hs_info: HaskellInfo.
package_databases: package caches excluding the cache file of the package
we're creating a runghc for.
lib_info: If we're building runghc for a library target, pass
HaskellLibraryInfo here, otherwise it should be None.
Returns:
None.
"""
(pkg_info_inputs, args) = pkg_info_to_compile_flags(
hs,
pkg_info = expose_packages(
package_ids = hs.package_ids,
package_databases = package_databases,
version = version,
),
prefix = "runghc-",
)
if lib_info != None:
for idir in set.to_list(hs_info.import_dirs):
args += ["-i{0}".format(idir)]
(ghci_extra_libs, ghc_env) = get_ghci_extra_libs(
hs,
cc_info,
path_prefix = "$RULES_HASKELL_EXEC_ROOT",
)
link_libraries(ghci_extra_libs, args)
runghc_file = hs.actions.declare_file(target_unique_name(hs, "runghc"))
# Extra arguments.
# `compiler flags` is the default set of arguments for runghc,
# augmented by `extra_args`.
# The ordering is important, first compiler flags (from toolchain
# and local rule), then from `extra_args`. This way the more
# specific arguments are listed last, and then have more priority in
# GHC.
# Note that most flags for GHCI do have their negative value, so a
# negative flag in `extra_args` can disable a positive flag set
# in `user_compile_flags`, such as `-XNoOverloadedStrings` will disable
# `-XOverloadedStrings`.
args += hs.toolchain.compiler_flags + user_compile_flags + hs.toolchain.repl_ghci_args
# ghc args need to be wrapped up in "--ghc-arg=" when passing to runghc
runcompile_flags = ["--ghc-arg=%s" % a for a in args]
runcompile_flags += extra_args
hs.actions.expand_template(
template = runghc_wrapper,
output = runghc_file,
substitutions = {
"{ENV}": render_env(ghc_env),
"{TOOL}": hs.tools.runghc.path,
"{CC}": hs.toolchain.cc_wrapper.executable.path,
"{ARGS}": " ".join([shell.quote(a) for a in runcompile_flags]),
},
is_executable = True,
)
# XXX We create a symlink here because we need to force
# hs.tools.runghc and the best way to do that is
# to use hs.actions.run. That action, in turn must produce
# a result, so using ln seems to be the only sane choice.
extra_inputs = depset(transitive = [
depset([
hs.tools.runghc,
runghc_file,
]),
package_databases,
pkg_info_inputs,
ghci_extra_libs,
hs_info.source_files,
hs.toolchain.cc_wrapper.runfiles.files,
])
ln(hs, runghc_file, output, extra_inputs)
| [] |
pymedphys/pymedphys-archive-2019 | tests/dicom/test_header_tweaks.py | 6bb7c8d0da2e93ff56469bb47e65b15ece2ea25e | # Copyright (C) 2019 Cancer Care Associates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import uuid
import numpy as np
import pydicom
from pymedphys._dicom.create import dicom_dataset_from_dict
from pymedphys._dicom.header import (
RED_adjustment_map_from_structure_names,
adjust_machine_name,
adjust_RED_by_structure_name,
adjust_rel_elec_density,
)
from pymedphys._dicom.utilities import remove_file
HERE = os.path.dirname(__file__)
ORIGINAL_DICOM_FILENAME = os.path.join(
HERE, "scratch", "original-{}.dcm".format(str(uuid.uuid4()))
)
ADJUSTED_DICOM_FILENAME = os.path.join(
HERE, "scratch", "adjusted-{}.dcm".format(str(uuid.uuid4()))
)
def compare_dicom_cli(command, original, expected):
pydicom.write_file(ORIGINAL_DICOM_FILENAME, original)
try:
subprocess.check_call(command)
cli_adjusted_ds = pydicom.read_file(ADJUSTED_DICOM_FILENAME, force=True)
assert str(cli_adjusted_ds) == str(expected)
finally:
remove_file(ORIGINAL_DICOM_FILENAME)
remove_file(ADJUSTED_DICOM_FILENAME)
def test_adjust_machine_name():
new_name = "new_name"
original_ds = dicom_dataset_from_dict(
{
"BeamSequence": [
{"TreatmentMachineName": "hello"},
{"TreatmentMachineName": "george"},
]
}
)
expected_ds = dicom_dataset_from_dict(
{
"BeamSequence": [
{"TreatmentMachineName": new_name},
{"TreatmentMachineName": new_name},
]
}
)
adjusted_ds = adjust_machine_name(original_ds, new_name)
assert adjusted_ds != original_ds
assert adjusted_ds == expected_ds
command = "pymedphys dicom adjust-machine-name".split() + [
ORIGINAL_DICOM_FILENAME,
ADJUSTED_DICOM_FILENAME,
new_name,
]
compare_dicom_cli(command, original_ds, expected_ds)
def test_electron_density_append():
adjustment_map = {
"to_be_changed 1": 1.0,
"to_be_changed 2": 0.5,
"to_be_changed 3": 1.5,
}
excess_adjustment_map = {**adjustment_map, **{"this_structure_doesnt_exist": 1.0}}
original_ds = dicom_dataset_from_dict(
{
"StructureSetROISequence": [
{"ROINumber": 1, "ROIName": "to_be_changed 1"},
{"ROINumber": 2, "ROIName": "dont_change_me"},
{"ROINumber": 10, "ROIName": "to_be_changed 2"},
{"ROINumber": 99, "ROIName": "to_be_changed 3"},
],
"RTROIObservationsSequence": [
{
"ReferencedROINumber": 1,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "EFFECTIVE_Z",
"ROIPhysicalPropertyValue": 6,
}
],
},
{"ReferencedROINumber": 2},
{"ReferencedROINumber": 10},
{
"ReferencedROINumber": 99,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": 0,
}
],
},
],
}
)
expected_ds = dicom_dataset_from_dict(
{
"RTROIObservationsSequence": [
{
"ReferencedROINumber": 1,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "EFFECTIVE_Z",
"ROIPhysicalPropertyValue": 6,
},
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": adjustment_map[
"to_be_changed 1"
],
},
],
},
{"ReferencedROINumber": 2},
{
"ReferencedROINumber": 10,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": adjustment_map[
"to_be_changed 2"
],
}
],
},
{
"ReferencedROINumber": 99,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": adjustment_map[
"to_be_changed 3"
],
}
],
},
]
},
template_ds=original_ds,
)
adjusted_ds = adjust_rel_elec_density(original_ds, adjustment_map)
assert adjusted_ds != original_ds
assert str(expected_ds) == str(adjusted_ds)
adjusted_with_excess_ds = adjust_rel_elec_density(
original_ds, excess_adjustment_map, ignore_missing_structure=True
)
assert adjusted_with_excess_ds != original_ds
assert str(expected_ds) == str(adjusted_with_excess_ds)
excess_adjustment_map_as_list = [
["{}".format(key), item] for key, item in excess_adjustment_map.items()
]
excess_adjustment_map_flat = np.concatenate(excess_adjustment_map_as_list).tolist()
command = (
"pymedphys dicom adjust-RED -i ".split()
+ [ORIGINAL_DICOM_FILENAME, ADJUSTED_DICOM_FILENAME]
+ excess_adjustment_map_flat
)
compare_dicom_cli(command, original_ds, expected_ds)
def test_structure_name_parse():
structure_names = [
"a RED=1",
"b",
"c",
"d RED=2.2",
"e red = 3",
"f",
"g Red: 4.7",
"h RED=0.5 ",
]
expected_adjustment_map = {
"a RED=1": 1,
"d RED=2.2": 2.2,
"e red = 3": 3,
"g Red: 4.7": 4.7,
"h RED=0.5 ": 0.5,
}
adjustment_map = RED_adjustment_map_from_structure_names(structure_names)
assert expected_adjustment_map == adjustment_map
def test_structure_name_based_RED_append():
electron_density_to_use = 0.5
original_ds = dicom_dataset_from_dict(
{
"StructureSetROISequence": [
{
"ROINumber": 1,
"ROIName": "a_structure RED={}".format(electron_density_to_use),
},
{"ROINumber": 2, "ROIName": "dont_change_me"},
],
"RTROIObservationsSequence": [
{"ReferencedROINumber": 1},
{"ReferencedROINumber": 2},
],
}
)
expected_ds = dicom_dataset_from_dict(
{
"RTROIObservationsSequence": [
{
"ReferencedROINumber": 1,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": electron_density_to_use,
}
],
},
{"ReferencedROINumber": 2},
]
},
template_ds=original_ds,
)
adjusted_ds = adjust_RED_by_structure_name(original_ds)
assert adjusted_ds != original_ds
assert str(expected_ds) == str(adjusted_ds)
command = "pymedphys dicom adjust-RED-by-structure-name".split() + [
ORIGINAL_DICOM_FILENAME,
ADJUSTED_DICOM_FILENAME,
]
compare_dicom_cli(command, original_ds, expected_ds)
| [((958, 983), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (973, 983), False, 'import os\n'), ((1256, 1309), 'pydicom.write_file', 'pydicom.write_file', (['ORIGINAL_DICOM_FILENAME', 'original'], {}), '(ORIGINAL_DICOM_FILENAME, original)\n', (1274, 1309), False, 'import pydicom\n'), ((1676, 1794), 'pymedphys._dicom.create.dicom_dataset_from_dict', 'dicom_dataset_from_dict', (["{'BeamSequence': [{'TreatmentMachineName': 'hello'}, {\n 'TreatmentMachineName': 'george'}]}"], {}), "({'BeamSequence': [{'TreatmentMachineName': 'hello'},\n {'TreatmentMachineName': 'george'}]})\n", (1699, 1794), False, 'from pymedphys._dicom.create import dicom_dataset_from_dict\n'), ((1893, 2013), 'pymedphys._dicom.create.dicom_dataset_from_dict', 'dicom_dataset_from_dict', (["{'BeamSequence': [{'TreatmentMachineName': new_name}, {\n 'TreatmentMachineName': new_name}]}"], {}), "({'BeamSequence': [{'TreatmentMachineName': new_name\n }, {'TreatmentMachineName': new_name}]})\n", (1916, 2013), False, 'from pymedphys._dicom.create import dicom_dataset_from_dict\n'), ((2111, 2153), 'pymedphys._dicom.header.adjust_machine_name', 'adjust_machine_name', (['original_ds', 'new_name'], {}), '(original_ds, new_name)\n', (2130, 2153), False, 'from pymedphys._dicom.header import RED_adjustment_map_from_structure_names, adjust_machine_name, adjust_RED_by_structure_name, adjust_rel_elec_density\n'), ((2714, 3361), 'pymedphys._dicom.create.dicom_dataset_from_dict', 'dicom_dataset_from_dict', (["{'StructureSetROISequence': [{'ROINumber': 1, 'ROIName': 'to_be_changed 1'},\n {'ROINumber': 2, 'ROIName': 'dont_change_me'}, {'ROINumber': 10,\n 'ROIName': 'to_be_changed 2'}, {'ROINumber': 99, 'ROIName':\n 'to_be_changed 3'}], 'RTROIObservationsSequence': [{\n 'ReferencedROINumber': 1, 'ROIPhysicalPropertiesSequence': [{\n 'ROIPhysicalProperty': 'EFFECTIVE_Z', 'ROIPhysicalPropertyValue': 6}]},\n {'ReferencedROINumber': 2}, {'ReferencedROINumber': 10}, {\n 'ReferencedROINumber': 99, 'ROIPhysicalPropertiesSequence': [{\n 'ROIPhysicalProperty': 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue': \n 0}]}]}"], {}), "({'StructureSetROISequence': [{'ROINumber': 1,\n 'ROIName': 'to_be_changed 1'}, {'ROINumber': 2, 'ROIName':\n 'dont_change_me'}, {'ROINumber': 10, 'ROIName': 'to_be_changed 2'}, {\n 'ROINumber': 99, 'ROIName': 'to_be_changed 3'}],\n 'RTROIObservationsSequence': [{'ReferencedROINumber': 1,\n 'ROIPhysicalPropertiesSequence': [{'ROIPhysicalProperty': 'EFFECTIVE_Z',\n 'ROIPhysicalPropertyValue': 6}]}, {'ReferencedROINumber': 2}, {\n 'ReferencedROINumber': 10}, {'ReferencedROINumber': 99,\n 'ROIPhysicalPropertiesSequence': [{'ROIPhysicalProperty':\n 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue': 0}]}]})\n", (2737, 3361), False, 'from pymedphys._dicom.create import dicom_dataset_from_dict\n'), ((3926, 4667), 'pymedphys._dicom.create.dicom_dataset_from_dict', 'dicom_dataset_from_dict', (["{'RTROIObservationsSequence': [{'ReferencedROINumber': 1,\n 'ROIPhysicalPropertiesSequence': [{'ROIPhysicalProperty': 'EFFECTIVE_Z',\n 'ROIPhysicalPropertyValue': 6}, {'ROIPhysicalProperty':\n 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue': adjustment_map[\n 'to_be_changed 1']}]}, {'ReferencedROINumber': 2}, {\n 'ReferencedROINumber': 10, 'ROIPhysicalPropertiesSequence': [{\n 'ROIPhysicalProperty': 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue':\n adjustment_map['to_be_changed 2']}]}, {'ReferencedROINumber': 99,\n 'ROIPhysicalPropertiesSequence': [{'ROIPhysicalProperty':\n 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue': adjustment_map[\n 'to_be_changed 3']}]}]}"], {'template_ds': 'original_ds'}), "({'RTROIObservationsSequence': [{\n 'ReferencedROINumber': 1, 'ROIPhysicalPropertiesSequence': [{\n 'ROIPhysicalProperty': 'EFFECTIVE_Z', 'ROIPhysicalPropertyValue': 6}, {\n 'ROIPhysicalProperty': 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue':\n adjustment_map['to_be_changed 1']}]}, {'ReferencedROINumber': 2}, {\n 'ReferencedROINumber': 10, 'ROIPhysicalPropertiesSequence': [{\n 'ROIPhysicalProperty': 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue':\n adjustment_map['to_be_changed 2']}]}, {'ReferencedROINumber': 99,\n 'ROIPhysicalPropertiesSequence': [{'ROIPhysicalProperty':\n 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue': adjustment_map[\n 'to_be_changed 3']}]}]}, template_ds=original_ds)\n", (3949, 4667), False, 'from pymedphys._dicom.create import dicom_dataset_from_dict\n'), ((5623, 5675), 'pymedphys._dicom.header.adjust_rel_elec_density', 'adjust_rel_elec_density', (['original_ds', 'adjustment_map'], {}), '(original_ds, adjustment_map)\n', (5646, 5675), False, 'from pymedphys._dicom.header import RED_adjustment_map_from_structure_names, adjust_machine_name, adjust_RED_by_structure_name, adjust_rel_elec_density\n'), ((5794, 5888), 'pymedphys._dicom.header.adjust_rel_elec_density', 'adjust_rel_elec_density', (['original_ds', 'excess_adjustment_map'], {'ignore_missing_structure': '(True)'}), '(original_ds, excess_adjustment_map,\n ignore_missing_structure=True)\n', (5817, 5888), False, 'from pymedphys._dicom.header import RED_adjustment_map_from_structure_names, adjust_machine_name, adjust_RED_by_structure_name, adjust_rel_elec_density\n'), ((6850, 6906), 'pymedphys._dicom.header.RED_adjustment_map_from_structure_names', 'RED_adjustment_map_from_structure_names', (['structure_names'], {}), '(structure_names)\n', (6889, 6906), False, 'from pymedphys._dicom.header import RED_adjustment_map_from_structure_names, adjust_machine_name, adjust_RED_by_structure_name, adjust_rel_elec_density\n'), ((7553, 7842), 'pymedphys._dicom.create.dicom_dataset_from_dict', 'dicom_dataset_from_dict', (["{'RTROIObservationsSequence': [{'ReferencedROINumber': 1,\n 'ROIPhysicalPropertiesSequence': [{'ROIPhysicalProperty':\n 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue': electron_density_to_use\n }]}, {'ReferencedROINumber': 2}]}"], {'template_ds': 'original_ds'}), "({'RTROIObservationsSequence': [{\n 'ReferencedROINumber': 1, 'ROIPhysicalPropertiesSequence': [{\n 'ROIPhysicalProperty': 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue':\n electron_density_to_use}]}, {'ReferencedROINumber': 2}]}, template_ds=\n original_ds)\n", (7576, 7842), False, 'from pymedphys._dicom.create import dicom_dataset_from_dict\n'), ((8123, 8164), 'pymedphys._dicom.header.adjust_RED_by_structure_name', 'adjust_RED_by_structure_name', (['original_ds'], {}), '(original_ds)\n', (8151, 8164), False, 'from pymedphys._dicom.header import RED_adjustment_map_from_structure_names, adjust_machine_name, adjust_RED_by_structure_name, adjust_rel_elec_density\n'), ((1328, 1358), 'subprocess.check_call', 'subprocess.check_call', (['command'], {}), '(command)\n', (1349, 1358), False, 'import subprocess\n'), ((1385, 1439), 'pydicom.read_file', 'pydicom.read_file', (['ADJUSTED_DICOM_FILENAME'], {'force': '(True)'}), '(ADJUSTED_DICOM_FILENAME, force=True)\n', (1402, 1439), False, 'import pydicom\n'), ((1515, 1551), 'pymedphys._dicom.utilities.remove_file', 'remove_file', (['ORIGINAL_DICOM_FILENAME'], {}), '(ORIGINAL_DICOM_FILENAME)\n', (1526, 1551), False, 'from pymedphys._dicom.utilities import remove_file\n'), ((1560, 1596), 'pymedphys._dicom.utilities.remove_file', 'remove_file', (['ADJUSTED_DICOM_FILENAME'], {}), '(ADJUSTED_DICOM_FILENAME)\n', (1571, 1596), False, 'from pymedphys._dicom.utilities import remove_file\n'), ((1074, 1086), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1084, 1086), False, 'import uuid\n'), ((1181, 1193), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1191, 1193), False, 'import uuid\n'), ((6168, 6213), 'numpy.concatenate', 'np.concatenate', (['excess_adjustment_map_as_list'], {}), '(excess_adjustment_map_as_list)\n', (6182, 6213), True, 'import numpy as np\n')] |
itewk/home-assistant | tests/components/http/test_data_validator.py | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | """Test data validator decorator."""
from unittest.mock import Mock
from aiohttp import web
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import RequestDataValidator
async def get_client(aiohttp_client, validator):
"""Generate a client that hits a view decorated with validator."""
app = web.Application()
app["hass"] = Mock(is_running=True)
class TestView(HomeAssistantView):
url = "/"
name = "test"
requires_auth = False
@validator
async def post(self, request, data):
"""Test method."""
return b""
TestView().register(app, app.router)
client = await aiohttp_client(app)
return client
async def test_validator(aiohttp_client):
"""Test the validator."""
client = await get_client(
aiohttp_client, RequestDataValidator(vol.Schema({vol.Required("test"): str}))
)
resp = await client.post("/", json={"test": "bla"})
assert resp.status == 200
resp = await client.post("/", json={"test": 100})
assert resp.status == 400
resp = await client.post("/")
assert resp.status == 400
async def test_validator_allow_empty(aiohttp_client):
"""Test the validator with empty data."""
client = await get_client(
aiohttp_client,
RequestDataValidator(
vol.Schema(
{
# Although we allow empty, our schema should still be able
# to validate an empty dict.
vol.Optional("test"): str
}
),
allow_empty=True,
),
)
resp = await client.post("/", json={"test": "bla"})
assert resp.status == 200
resp = await client.post("/", json={"test": 100})
assert resp.status == 400
resp = await client.post("/")
assert resp.status == 200
| [((389, 406), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (404, 406), False, 'from aiohttp import web\n'), ((425, 446), 'unittest.mock.Mock', 'Mock', ([], {'is_running': '(True)'}), '(is_running=True)\n', (429, 446), False, 'from unittest.mock import Mock\n'), ((937, 957), 'voluptuous.Required', 'vol.Required', (['"""test"""'], {}), "('test')\n", (949, 957), True, 'import voluptuous as vol\n'), ((1586, 1606), 'voluptuous.Optional', 'vol.Optional', (['"""test"""'], {}), "('test')\n", (1598, 1606), True, 'import voluptuous as vol\n')] |
sam-aldis/Conversley | Conversely_Frontend/app/Server/ukjp/templates.py | 1fc30d6b768cc03f727229a52e0879fac3af1e3a | import days
STAGE_INIT = 0
STAGE_CHALLENGE_INIT = 1
STAGE_BOOKED = 2
def createJSONTemplate(data):
pass
messages = [
"Hey {{first_name}}, thankyou for your enquiry to be one of our Transformation Challengers",
"We have 2 Challenges available for you:\n\nThe 8 Week Bikini Challenge which helps you shed 3-9kg of unwanted body fat, flattens your tummy and tones your arms, abs, legs and butt.\n\nOr our 9in6 Challenge which helps you drop 9+kgs of pure fat in just 6 Weeks.",
"Please choose which challenge information you would like below..."
]
callbacks = {
"INIT_8WBC" : [
{
"type": "message",
"text" : "Thank you {{first_name}},\n\
The FREE 8 Week Bikini Challenge is a done for you - step by step PROVEN program that helps you lose the 3-7kg of unwanted body fat, flatten your tummy and tone your arms, legs and butt.\n\
\n\
This is your chance to transform your body in just 8 weeks for FREE"
},
{
"type" : "message",
"text" : "In exchange for the program being FREE....we ask that you allow us to share your transformation story on our Facebook fan page for marketing purposes to help motivate and inspire the ladies of Perth. \n\
(Please note, a small refundable deposit applies to keep you motivated throughout the 8 weeks)"
},
{
"type": "message",
"text": "The challenge is starting Monday 12th of June and to start your 8 Week Bikini Challenge, we just require you to attend the upcoming information meeting at the facility to quickly go over the program in person. \n\
\n\
There is absolutely no high pressure sales or obligation to join. Simply a meet and chat.\n\
\n\
To RSVP to the meeting click a suitable date below"
},
{
"type" : "json",
"template" : "init_8wbc"
}
],
"INIT_9IN6" : [
{
"type" : "message",
"text" : "Thank you {{first_name}},\n\
The 9in6 Transformation Challenge is a done for you - step by step PROVEN program that helps you lose 9kg kilos of unwanted body fat, flatten your tummy and tone your arms, legs and butt in just 6 weeks.\n\
\
\nThis is your chance to transform your body in just 6 weeks for FREE!"
},
{
"type" : "message",
"text" : "In exchange for the program, we ask that you allow us to showcase your transformation story on our Facebook fan page for marketing purposes to help motivate and inspire the ladies of Perth. When you complete the program its FREE. \n\
Please note, a small refundable \"incentive deposit\" applies to keep you motivated throughout the 6 weeks."
},
{
"type" : "message",
"text" : "The challenge is starting Monday 12th of June and to start your 9kg 6-week challenge, we require you to attend the upcoming information meeting where we explain the program in person. \n\
\n\
There is absolutely no high pressure sales or obligation to join at the end, just an opportunity for you learn about the program and how you can lose 9kg in 6 weeks for FREE\n\
\n\
To RSVP to the meeting click a suitable date below"
},
{
"type" : "json",
"template" : "init_9in6"
}
],
"TIME_TABLE_8WBC" : [
{
"type" : "message",
"text" : "Sure here's our lesson time table.."
},
{
"type" : "file",
"url" : "http://thetransformationcentre.com.au/img/timetable.pdf"
},
{
"type" : "json",
"template" : "init_8wbc"
}
]
}
def build_json_templates():
JSON_TEMPLATES = {
"init" :{
"template_type" : "generic",
"elements" : [
{
"title" : "The Transformation Centre",
"image_url" : "http://thetransformationcentre.com.au/img/spinner/1.png",
"subtitle":"Choose one of our Challenges below",
"buttons":[
{
"type":"postback",
"payload":"INIT_8WBC",
"title":"8 Week Bikini Challenge"
},{
"type":"postback",
"title":"9kg 6 Week Challenge",
"payload":"INIT_9IN6"
}
]
}
]
},
"init_8wbc" : {
"template_type" : "generic",
"elements" : [
{
"title" : "8 Week Bikini Challenge Meeting",
"subtitle":"RSVP by clicking a suitable data below",
"buttons":[
# {
# "type":"postback",
# "payload":"BOOK_CONSULT_8WBC_DATE_" + days.getAppointmentDates(1)[2] + "_DAY_" + days.getAppointmentDates(1)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[1],
# "title":days.getAppointmentDates(1)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[0] + " " + days.getAppointmentDates(1)[1]
# }
# },
{
"type":"postback",
"title": "Sat 10th June 09.45",
"payload":"BOOK_CONSULT_8WBC_DATE_10.05.2017_DAY_SATURDAY_TIME_0945"
}
]
}
]
},
"init_9in6" : {
"template_type" : "generic",
"elements" : [
{
"title" : "9kg 6 Week Challenge Info Meeting",
"subtitle":"RSVP by clicking a suitable date below",
"buttons":[
# {
# "type":"postback",
# "payload":"BOOK_CONSULT_9KG6WK_DATE_" + days.getAppointmentDates(1)[2] + "_DAY_" + days.getAppointmentDates(1)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[1],
# "title":days.getAppointmentDates(1)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[0] + " " + days.getAppointmentDates(1)[1]
# }
{
"type":"postback",
"title": "Sat 10th June 09.45",
"payload":"BOOK_CONSULT_8WBC_DATE_10.05.2017_DAY_SATURDAY_TIME_0945"
}
# ,{
# "type":"postback",
# "title": days.getAppointmentDates(2)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(2)[0])[0] + " " + days.getAppointmentDates(2)[1],
# "payload":"BOOK_CONSULT_9KG6WK_DATE_" + days.getAppointmentDates(2)[2] + "_DAY_" + days.getAppointmentDates(2)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(2)[0])[1]
# }
]
}
]
}
}
return JSON_TEMPLATES | [] |
jeanbez/spack | var/spack/repos/builtin/packages/pagmo2/package.py | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Pagmo2(CMakePackage):
"""Parallel Global Multiobjective Optimizer (and its Python alter ego
PyGMO) is a C++ / Python platform to perform parallel computations of
optimisation tasks (global and local) via the asynchronous generalized
island model."""
homepage = "https://esa.github.io/pagmo2/"
url = "https://github.com/esa/pagmo2/archive/v2.18.0.tar.gz"
git = "https://github.com/esa/pagmo2.git"
maintainers = ['liuyangzhuan']
version('master', branch='master')
version('2.18.0', sha256='5ad40bf3aa91857a808d6b632d9e1020341a33f1a4115d7a2b78b78fd063ae31')
depends_on('boost+system+serialization+thread')
depends_on('intel-tbb')
depends_on('mpi')
depends_on('cmake@3.1:', type='build')
variant('shared', default=True, description='Build shared libraries')
def cmake_args(self):
spec = self.spec
args = [
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
self.define_from_variant('BUILD_SHARED_LIBS', 'shared'),
]
return args
| [] |
earthobservatory/ariamh-pub | interferogram/sentinel/fetchCalES.py | f33731e127f38ff33b02e02c07b16793c07651a6 | #!/usr/bin/env python3
import os, sys, re, json, requests, datetime, tarfile, argparse
from pprint import pprint
import numpy as np
from utils.UrlUtils import UrlUtils
server = 'https://qc.sentinel1.eo.esa.int/'
cal_re = re.compile(r'S1\w_AUX_CAL')
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser(description='Fetch calibration auxiliary files ingested into HySDS')
parser.add_argument('-o', '--output', dest='outdir', type=str, default='.',
help='Path to output directory')
parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true',
help="Don't download anything; just output the URLs")
return parser.parse_args()
def download_file(url, outdir='.', session=None):
'''
Download file to specified directory.
'''
if session is None:
session = requests.session()
path = "%s.tgz" % os.path.join(outdir, os.path.basename(url))
print('Downloading URL: ', url)
request = session.get(url, stream=True, verify=False)
request.raise_for_status()
with open(path,'wb') as f:
for chunk in request.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return path
def untar_file(path, outdir):
'''
Extract aux cal files.
'''
if not tarfile.is_tarfile(path):
raise RuntimeError("%s is not a tarfile." % path)
with tarfile.open(path) as f:
f.extractall(outdir)
def get_active_ids(es_url):
"""Query for the active calibration IDs."""
query = {
"query":{
"bool":{
"must":[
{"term":{"_id": "S1_AUX_CAL_ACTIVE"}},
]
}
},
"sort":[ { "starttime": { "order": "desc" } } ]
}
es_index = "grq_*_s1-aux_cal_active"
if es_url.endswith('/'):
search_url = '%s%s/_search' % (es_url, es_index)
else:
search_url = '%s/%s/_search' % (es_url, es_index)
r = requests.post(search_url, data=json.dumps(query))
if r.status_code == 200:
result = r.json()
#pprint(result)
total = result['hits']['total']
if total == 0:
raise RuntimeError("Failed to find S1_AUX_CAL_ACTIVE at %s." % search_url)
return result['hits']['hits'][0]['_source']['metadata']['active_ids']
else:
print("Failed to query %s:\n%s" % (es_url, r.text), file=sys.stderr)
print("query: %s" % json.dumps(query, indent=2), file=sys.stderr)
print("returned: %s" % r.text, file=sys.stderr)
r.raise_for_status()
def get_cal_url(id, es_url):
"""Query for the active calibration url."""
query = {
"query":{
"bool":{
"must":[
{"term":{"_id": id}},
]
}
},
"fields": ["urls", "metadata.archive_filename"]
}
es_index = "grq_*_s1-aux_cal"
if es_url.endswith('/'):
search_url = '%s%s/_search' % (es_url, es_index)
else:
search_url = '%s/%s/_search' % (es_url, es_index)
r = requests.post(search_url, data=json.dumps(query))
if r.status_code == 200:
result = r.json()
pprint(result)
total = result['hits']['total']
if total == 0:
raise RuntimeError("Failed to find %s at %s." % (id, search_url))
urls = result['hits']['hits'][0]['fields']['urls']
archive_fname = result['hits']['hits'][0]['fields']['metadata.archive_filename'][0]
url = [x for x in urls if x.startswith('http')][0]
#print(urls)
#print(url)
#print(archive_fname)
return os.path.join(url, archive_fname)
else:
print("Failed to query %s:\n%s" % (es_url, r.text), file=sys.stderr)
print("query: %s" % json.dumps(query, indent=2), file=sys.stderr)
print("returned: %s" % r.text, file=sys.stderr)
r.raise_for_status()
def fetch(outdir, dry_run):
# get endpoint configurations
uu = UrlUtils()
es_url = uu.rest_url
# get active calibration ids
active_ids = get_active_ids(es_url)
print(active_ids)
# get urls for active calibration files
cal_urls = [get_cal_url(i, es_url) for i in active_ids]
print(cal_urls)
if len(cal_urls) == 0:
print('Failed to find calibration auxiliary files')
if dry_run: print('\n'.join(cal_urls))
else:
if not os.path.isdir(outdir): os.makedirs(outdir)
for cal_url in cal_urls:
try: cal_file = download_file(cal_url, outdir)
except:
print('Failed to download URL: ', cal_url)
raise
try: cal_dir = untar_file(cal_file, outdir)
except:
print('Failed to untar: ', cal_file)
raise
os.unlink(cal_file)
if __name__ == '__main__':
inps = cmdLineParse()
fetch(inps.outdir, inps.dry_run)
| [((225, 252), 're.compile', 're.compile', (['"""S1\\\\w_AUX_CAL"""'], {}), "('S1\\\\w_AUX_CAL')\n", (235, 252), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n'), ((329, 426), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fetch calibration auxiliary files ingested into HySDS"""'}), "(description=\n 'Fetch calibration auxiliary files ingested into HySDS')\n", (352, 426), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n'), ((4057, 4067), 'utils.UrlUtils.UrlUtils', 'UrlUtils', ([], {}), '()\n', (4065, 4067), False, 'from utils.UrlUtils import UrlUtils\n'), ((878, 896), 'requests.session', 'requests.session', ([], {}), '()\n', (894, 896), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n'), ((1362, 1386), 'tarfile.is_tarfile', 'tarfile.is_tarfile', (['path'], {}), '(path)\n', (1380, 1386), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n'), ((1455, 1473), 'tarfile.open', 'tarfile.open', (['path'], {}), '(path)\n', (1467, 1473), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n'), ((3252, 3266), 'pprint.pprint', 'pprint', (['result'], {}), '(result)\n', (3258, 3266), False, 'from pprint import pprint\n'), ((3704, 3736), 'os.path.join', 'os.path.join', (['url', 'archive_fname'], {}), '(url, archive_fname)\n', (3716, 3736), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n'), ((941, 962), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (957, 962), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n'), ((2065, 2082), 'json.dumps', 'json.dumps', (['query'], {}), '(query)\n', (2075, 2082), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n'), ((3170, 3187), 'json.dumps', 'json.dumps', (['query'], {}), '(query)\n', (3180, 3187), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n'), ((4473, 4494), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (4486, 4494), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n'), ((4496, 4515), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (4507, 4515), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n'), ((4872, 4891), 'os.unlink', 'os.unlink', (['cal_file'], {}), '(cal_file)\n', (4881, 4891), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n'), ((2506, 2533), 'json.dumps', 'json.dumps', (['query'], {'indent': '(2)'}), '(query, indent=2)\n', (2516, 2533), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n'), ((3852, 3879), 'json.dumps', 'json.dumps', (['query'], {'indent': '(2)'}), '(query, indent=2)\n', (3862, 3879), False, 'import os, sys, re, json, requests, datetime, tarfile, argparse\n')] |
stain/conservancy-website | www/conservancy/urls.py | 9e41ddff766fe517a99198d60701193e8b68415e | # Copyright 2005-2008, James Garrison
# Copyright 2010, 2012 Bradley M. Kuhn
# This software's license gives you freedom; you can copy, convey,
# propagate, redistribute, modify and/or redistribute modified versions of
# this program under the terms of the GNU Affero General Public License
# (AGPL) as published by the Free Software Foundation (FSF), either
# version 3 of the License, or (at your option) any later version of the
# AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url, include
from django.contrib import admin, admindocs
from conservancy import feeds, frontpage, sponsors
import conservancy.apps.fundgoal.views as fundgoal_views
import conservancy.static.views as static_views
admin.autodiscover()
urlpatterns = [
url(r'^$', frontpage.view),
url(r'^sponsors$', frontpage.view),
url(r'^sponsors/$', sponsors.view),
url(r'^sponsors/index.html$', sponsors.view),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls),
url(r'^feeds/blog/?$', feeds.BlogFeed()),
url(r'^feeds/news/?$', feeds.PressReleaseFeed()),
url(r'^feeds/omnibus/?$', feeds.OmnibusFeed()),
url(r'^feeds/?$', feeds.view),
url(r'^news(/|$)', include('conservancy.apps.news.urls')),
url(r'^blog(/|$)', include('conservancy.apps.blog.urls')),
# formerly static templated things... (dirs with templates)
url(r'^error/(40[134]|500)(?:/index\.html|/|)$', static_views.handler),
url(r'^error', static_views.index),
url(r'^about', static_views.index),
url(r'^donate', static_views.index),
url(r'^copyleft-compliance', static_views.index,
{'fundraiser_sought' : 'vmware-match-0'}),
url(r'^projects', static_views.index),
url(r'^npoacct', static_views.index,
{'fundraiser_sought' : 'npoacct'}),
url(r'^contractpatch', include('conservancy.apps.contractpatch.urls')),
url(r'^overview', static_views.index),
url(r'^privacy-policy', static_views.index),
url(r'^supporter', include('conservancy.apps.supporter.urls')),
url(r'^fundraiser_data', fundgoal_views.view),
]
| [((1161, 1181), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (1179, 1181), False, 'from django.contrib import admin, admindocs\n'), ((1203, 1228), 'django.conf.urls.url', 'url', (['"""^$"""', 'frontpage.view'], {}), "('^$', frontpage.view)\n", (1206, 1228), False, 'from django.conf.urls import url, include\n'), ((1235, 1268), 'django.conf.urls.url', 'url', (['"""^sponsors$"""', 'frontpage.view'], {}), "('^sponsors$', frontpage.view)\n", (1238, 1268), False, 'from django.conf.urls import url, include\n'), ((1275, 1308), 'django.conf.urls.url', 'url', (['"""^sponsors/$"""', 'sponsors.view'], {}), "('^sponsors/$', sponsors.view)\n", (1278, 1308), False, 'from django.conf.urls import url, include\n'), ((1315, 1358), 'django.conf.urls.url', 'url', (['"""^sponsors/index.html$"""', 'sponsors.view'], {}), "('^sponsors/index.html$', sponsors.view)\n", (1318, 1358), False, 'from django.conf.urls import url, include\n'), ((1432, 1463), 'django.conf.urls.url', 'url', (['"""^admin/"""', 'admin.site.urls'], {}), "('^admin/', admin.site.urls)\n", (1435, 1463), False, 'from django.conf.urls import url, include\n'), ((1622, 1650), 'django.conf.urls.url', 'url', (['"""^feeds/?$"""', 'feeds.view'], {}), "('^feeds/?$', feeds.view)\n", (1625, 1650), False, 'from django.conf.urls import url, include\n'), ((1847, 1917), 'django.conf.urls.url', 'url', (['"""^error/(40[134]|500)(?:/index\\\\.html|/|)$"""', 'static_views.handler'], {}), "('^error/(40[134]|500)(?:/index\\\\.html|/|)$', static_views.handler)\n", (1850, 1917), False, 'from django.conf.urls import url, include\n'), ((1923, 1956), 'django.conf.urls.url', 'url', (['"""^error"""', 'static_views.index'], {}), "('^error', static_views.index)\n", (1926, 1956), False, 'from django.conf.urls import url, include\n'), ((1963, 1996), 'django.conf.urls.url', 'url', (['"""^about"""', 'static_views.index'], {}), "('^about', static_views.index)\n", (1966, 1996), False, 'from django.conf.urls import url, include\n'), ((2003, 2037), 'django.conf.urls.url', 'url', (['"""^donate"""', 'static_views.index'], {}), "('^donate', static_views.index)\n", (2006, 2037), False, 'from django.conf.urls import url, include\n'), ((2044, 2136), 'django.conf.urls.url', 'url', (['"""^copyleft-compliance"""', 'static_views.index', "{'fundraiser_sought': 'vmware-match-0'}"], {}), "('^copyleft-compliance', static_views.index, {'fundraiser_sought':\n 'vmware-match-0'})\n", (2047, 2136), False, 'from django.conf.urls import url, include\n'), ((2167, 2203), 'django.conf.urls.url', 'url', (['"""^projects"""', 'static_views.index'], {}), "('^projects', static_views.index)\n", (2170, 2203), False, 'from django.conf.urls import url, include\n'), ((2210, 2279), 'django.conf.urls.url', 'url', (['"""^npoacct"""', 'static_views.index', "{'fundraiser_sought': 'npoacct'}"], {}), "('^npoacct', static_views.index, {'fundraiser_sought': 'npoacct'})\n", (2213, 2279), False, 'from django.conf.urls import url, include\n'), ((2381, 2417), 'django.conf.urls.url', 'url', (['"""^overview"""', 'static_views.index'], {}), "('^overview', static_views.index)\n", (2384, 2417), False, 'from django.conf.urls import url, include\n'), ((2424, 2466), 'django.conf.urls.url', 'url', (['"""^privacy-policy"""', 'static_views.index'], {}), "('^privacy-policy', static_views.index)\n", (2427, 2466), False, 'from django.conf.urls import url, include\n'), ((2541, 2585), 'django.conf.urls.url', 'url', (['"""^fundraiser_data"""', 'fundgoal_views.view'], {}), "('^fundraiser_data', fundgoal_views.view)\n", (2544, 2585), False, 'from django.conf.urls import url, include\n'), ((1385, 1425), 'django.conf.urls.include', 'include', (['"""django.contrib.admindocs.urls"""'], {}), "('django.contrib.admindocs.urls')\n", (1392, 1425), False, 'from django.conf.urls import url, include\n'), ((1493, 1509), 'conservancy.feeds.BlogFeed', 'feeds.BlogFeed', ([], {}), '()\n', (1507, 1509), False, 'from conservancy import feeds, frontpage, sponsors\n'), ((1539, 1563), 'conservancy.feeds.PressReleaseFeed', 'feeds.PressReleaseFeed', ([], {}), '()\n', (1561, 1563), False, 'from conservancy import feeds, frontpage, sponsors\n'), ((1596, 1615), 'conservancy.feeds.OmnibusFeed', 'feeds.OmnibusFeed', ([], {}), '()\n', (1613, 1615), False, 'from conservancy import feeds, frontpage, sponsors\n'), ((1676, 1713), 'django.conf.urls.include', 'include', (['"""conservancy.apps.news.urls"""'], {}), "('conservancy.apps.news.urls')\n", (1683, 1713), False, 'from django.conf.urls import url, include\n'), ((1739, 1776), 'django.conf.urls.include', 'include', (['"""conservancy.apps.blog.urls"""'], {}), "('conservancy.apps.blog.urls')\n", (1746, 1776), False, 'from django.conf.urls import url, include\n'), ((2328, 2374), 'django.conf.urls.include', 'include', (['"""conservancy.apps.contractpatch.urls"""'], {}), "('conservancy.apps.contractpatch.urls')\n", (2335, 2374), False, 'from django.conf.urls import url, include\n'), ((2492, 2534), 'django.conf.urls.include', 'include', (['"""conservancy.apps.supporter.urls"""'], {}), "('conservancy.apps.supporter.urls')\n", (2499, 2534), False, 'from django.conf.urls import url, include\n')] |
FabienArcellier/spike-graphene-flask | graphene_spike_tests/acceptances/test_query.py | bc7bce571a21826c3da852eb1c2e1904bbab99b4 | import unittest
from unittest.mock import Mock
from graphene import Schema
from graphene.test import Client
from graphene_spike.query import Query
class MainTest(unittest.TestCase):
def setUp(self):
self.schema = Schema(query=Query)
self.client = client = Client(self.schema)
def test_hello_should_work_without_argument(self):
# Assign
query_string = '{ hello }'
# Acts
executed = self.client.execute(query_string)
# Assert
self.assertEqual(executed['data'], {"hello": "Hello stranger, you have 18 !"})
def test_hello_should_write_the_giving_name(self):
# Assign
query_string = '{ hello(name: "Fabien") }'
# Acts
executed = self.client.execute(query_string)
# Assert
self.assertEqual(executed['data'], {"hello": "Hello Fabien, you have 18 !"})
def test_hello_should_write_the_giving_age(self):
# Assign
query_string = '{ hello(age: 24) }'
# Acts
executed = self.client.execute(query_string)
# Assert
self.assertEqual(executed['data'], {"hello": "Hello stranger, you have 24 !"})
def test_goodbye_should_giving_a_response(self):
# Assign
query_string = '{ goodbye }'
# Acts
executed = self.client.execute(query_string)
# Assert
self.assertEqual(executed['data'], {"goodbye": "See ya!"})
| [((229, 248), 'graphene.Schema', 'Schema', ([], {'query': 'Query'}), '(query=Query)\n', (235, 248), False, 'from graphene import Schema\n'), ((280, 299), 'graphene.test.Client', 'Client', (['self.schema'], {}), '(self.schema)\n', (286, 299), False, 'from graphene.test import Client\n')] |
davidventasmarin/clikan | clikan.py | 401fe4053a14873872bb246739d55c55f8f6dcfa | from rich import print
from rich.console import Console
from rich.table import Table
import click
from click_default_group import DefaultGroup
import yaml
import os
##from terminaltables import SingleTable
import sys
from textwrap import wrap
import collections
import datetime
import configparser
import pkg_resources # part of setuptools
VERSION = pkg_resources.require("clikan")[0].version
class Config(object):
"""The config in this example only holds aliases."""
def __init__(self):
self.path = os.getcwd()
self.aliases = {}
def read_config(self, filename):
parser = configparser.RawConfigParser()
parser.read([filename])
try:
self.aliases.update(parser.items('aliases'))
except configparser.NoSectionError:
pass
pass_config = click.make_pass_decorator(Config, ensure=True)
class AliasedGroup(DefaultGroup):
"""This subclass of a group supports looking up aliases in a config
file and with a bit of magic.
"""
def get_command(self, ctx, cmd_name):
# Step one: bulitin commands as normal
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
# Step two: find the config object and ensure it's there. This
# will create the config object is missing.
cfg = ctx.ensure_object(Config)
# Step three: lookup an explicit command aliase in the config
if cmd_name in cfg.aliases:
actual_cmd = cfg.aliases[cmd_name]
return click.Group.get_command(self, ctx, actual_cmd)
# Alternative option: if we did not find an explicit alias we
# allow automatic abbreviation of the command. "status" for
# instance will match "st". We only allow that however if
# there is only one command.
matches = [x for x in self.list_commands(ctx)
if x.lower().startswith(cmd_name.lower())]
if not matches:
return None
elif len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))
def read_config(ctx, param, value):
"""Callback that is used whenever --config is passed. We use this to
always load the correct config. This means that the config is loaded
even if the group itself never executes so our aliases stay always
available.
"""
cfg = ctx.ensure_object(Config)
if value is None:
value = os.path.join(os.path.dirname(__file__), 'aliases.ini')
cfg.read_config(value)
return value
@click.version_option(VERSION)
@click.command(cls=AliasedGroup, default='show', default_if_no_args=True)
def clikan():
"""clikan: CLI personal kanban """
@clikan.command()
def configure():
"""Place default config file in CLIKAN_HOME or HOME"""
home = get_clikan_home()
data_path = os.path.join(home, ".clikan.dat")
config_path = os.path.join(home, ".clikan.yaml")
if (os.path.exists(config_path) and not
click.confirm('Config file exists. Do you want to overwrite?')):
return
with open(config_path, 'w') as outfile:
conf = {'clikan_data': data_path}
yaml.dump(conf, outfile, default_flow_style=False)
click.echo("Creating %s" % config_path)
@clikan.command()
@click.argument('task')
def add(task):
"""Add a task in todo"""
if len(task) > 40:
click.echo('Task must be shorter than 40 chars. Brevity counts.')
else:
config = read_config_yaml()
dd = read_data(config)
todos, inprogs, dones = split_items(config, dd)
if ('limits' in config and 'todo' in config['limits'] and
int(config['limits']['todo']) <= len(todos)):
click.echo('No new todos, limit reached already.')
else:
od = collections.OrderedDict(sorted(dd['data'].items()))
new_id = 1
if bool(od):
new_id = next(reversed(od)) + 1
entry = ['todo', task, timestamp(), timestamp()]
dd['data'].update({new_id: entry})
click.echo("Creating new task w/ id: %d -> %s" % (new_id, task))
write_data(config, dd)
@clikan.command()
@click.argument('id')
def delete(id):
"""Delete task"""
config = read_config_yaml()
dd = read_data(config)
item = dd['data'].get(int(id))
if item is None:
click.echo('No existing task with that id.')
else:
item[0] = 'deleted'
item[2] = timestamp()
dd['deleted'].update({int(id): item})
dd['data'].pop(int(id))
write_data(config, dd)
click.echo('Removed task %d.' % int(id))
@clikan.command()
@click.argument('id')
def promote(id):
"""Promote task"""
config = read_config_yaml()
dd = read_data(config)
todos, inprogs, dones = split_items(config, dd)
item = dd['data'].get(int(id))
if item[0] == 'todo':
if ('limits' in config and 'wip' in config['limits'] and
int(config['limits']['wip']) <= len(inprogs)):
click.echo('No new tasks, limit reached already.')
else:
click.echo('Promoting task %s to in-progress.' % id)
dd['data'][int(id)] = ['inprogress', item[1], timestamp(), item[3]]
write_data(config, dd)
elif item[0] == 'inprogress':
click.echo('Promoting task %s to done.' % id)
dd['data'][int(id)] = ['done', item[1], timestamp(), item[3]]
write_data(config, dd)
else:
click.echo('Already done, can not promote %s' % id)
@clikan.command()
@click.argument('id')
def regress(id):
"""Regress task"""
config = read_config_yaml()
dd = read_data(config)
item = dd['data'].get(int(id))
if item[0] == 'done':
click.echo('Regressing task %s to in-progress.' % id)
dd['data'][int(id)] = ['inprogress', item[1], timestamp(), item[3]]
write_data(config, dd)
elif item[0] == 'inprogress':
click.echo('Regressing task %s to todo.' % id)
dd['data'][int(id)] = ['todo', item[1], timestamp(), item[3]]
write_data(config, dd)
else:
click.echo('Already in todo, can not regress %s' % id)
@clikan.command()
def show():
console = Console()
"""Show tasks in clikan"""
config = read_config_yaml()
dd = read_data(config)
todos, inprogs, dones = split_items(config, dd)
if 'limits' in config and 'done' in config['limits']:
dones = dones[0:int(config['limits']['done'])]
else:
dones = dones[0:10]
todos = '\n'.join([str(x) for x in todos])
inprogs = '\n'.join([str(x) for x in inprogs])
dones = '\n'.join([str(x) for x in dones])
# td = [
# ['todo', 'in-progress', '[bold magenta]done[/bold magenta]'],
# ['', '', ''],
# ]
#table = SingleTable(td, 'clikan v.{}'.format(VERSION))
# table.inner_heading_row_border = False
# table.inner_row_border = True
# table.justify_columns = {0: 'center', 1: 'center', 2: 'center'}
table = Table(show_header=True, show_footer=True)
table.add_column("[bold yellow]todo[/bold yellow]", no_wrap=True, footer="clikan")
table.add_column('[bold green]in-progress[/bold green]', no_wrap=True)
table.add_column('[bold magenta]done[/bold magenta]', no_wrap=True, footer="v.{}".format(VERSION))
# def wrap_lines(lines, column_index):
# max_width = table.column_max_width(column_index)
# packed = [line for line in lines if line.strip() != '']
# wrapped = [wrap(line, max_width, break_long_words=False,
# replace_whitespace=False) for line in packed]
# return '\n'.join(['\n'.join(w) for w in wrapped])
# for index, section in enumerate((todos, inprogs, dones)):
# table.table_data[1][index] = wrap_lines(section.splitlines(), index)
table.add_row(todos, inprogs, dones)
console.print(table)
#print(table.table)
def read_data(config):
"""Read the existing data from the config datasource"""
try:
with open(config["clikan_data"], 'r') as stream:
try:
return yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print("Ensure %s exists, as you specified it "
"as the clikan data file." % config['clikan_data'])
print(exc)
except IOError:
click.echo("No data, initializing data file.")
write_data(config, {"data": {}, "deleted": {}})
with open(config["clikan_data"], 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader)
def write_data(config, data):
"""Write the data to the config datasource"""
with open(config["clikan_data"], 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
def get_clikan_home():
home = os.environ.get('CLIKAN_HOME')
if not home:
home = os.path.expanduser('~')
return home
def read_config_yaml():
"""Read the app config from ~/.clikan.yaml"""
try:
home = get_clikan_home()
with open(home + "/.clikan.yaml", 'r') as stream:
try:
return yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError:
print("Ensure %s/.clikan.yaml is valid, expected YAML." % home)
sys.exit()
except IOError:
print("Ensure %s/.clikan.yaml exists and is valid." % home)
sys.exit()
def split_items(config, dd):
todos = []
inprogs = []
dones = []
for key, value in dd['data'].items():
if value[0] == 'todo':
todos.append("[%d] %s" % (key, value[1]))
elif value[0] == 'inprogress':
inprogs.append("[%d] %s" % (key, value[1]))
else:
dones.insert(0, "[%d] %s" % (key, value[1]))
return todos, inprogs, dones
def timestamp():
return '{:%Y-%b-%d %H:%M:%S}'.format(datetime.datetime.now())
| [((824, 870), 'click.make_pass_decorator', 'click.make_pass_decorator', (['Config'], {'ensure': '(True)'}), '(Config, ensure=True)\n', (849, 870), False, 'import click\n'), ((2635, 2664), 'click.version_option', 'click.version_option', (['VERSION'], {}), '(VERSION)\n', (2655, 2664), False, 'import click\n'), ((2666, 2738), 'click.command', 'click.command', ([], {'cls': 'AliasedGroup', 'default': '"""show"""', 'default_if_no_args': '(True)'}), "(cls=AliasedGroup, default='show', default_if_no_args=True)\n", (2679, 2738), False, 'import click\n'), ((3366, 3388), 'click.argument', 'click.argument', (['"""task"""'], {}), "('task')\n", (3380, 3388), False, 'import click\n'), ((4276, 4296), 'click.argument', 'click.argument', (['"""id"""'], {}), "('id')\n", (4290, 4296), False, 'import click\n'), ((4750, 4770), 'click.argument', 'click.argument', (['"""id"""'], {}), "('id')\n", (4764, 4770), False, 'import click\n'), ((5649, 5669), 'click.argument', 'click.argument', (['"""id"""'], {}), "('id')\n", (5663, 5669), False, 'import click\n'), ((2933, 2966), 'os.path.join', 'os.path.join', (['home', '""".clikan.dat"""'], {}), "(home, '.clikan.dat')\n", (2945, 2966), False, 'import os\n'), ((2985, 3019), 'os.path.join', 'os.path.join', (['home', '""".clikan.yaml"""'], {}), "(home, '.clikan.yaml')\n", (2997, 3019), False, 'import os\n'), ((3305, 3344), 'click.echo', 'click.echo', (["('Creating %s' % config_path)"], {}), "('Creating %s' % config_path)\n", (3315, 3344), False, 'import click\n'), ((6308, 6317), 'rich.console.Console', 'Console', ([], {}), '()\n', (6315, 6317), False, 'from rich.console import Console\n'), ((7100, 7141), 'rich.table.Table', 'Table', ([], {'show_header': '(True)', 'show_footer': '(True)'}), '(show_header=True, show_footer=True)\n', (7105, 7141), False, 'from rich.table import Table\n'), ((8928, 8957), 'os.environ.get', 'os.environ.get', (['"""CLIKAN_HOME"""'], {}), "('CLIKAN_HOME')\n", (8942, 8957), False, 'import os\n'), ((352, 383), 'pkg_resources.require', 'pkg_resources.require', (['"""clikan"""'], {}), "('clikan')\n", (373, 383), False, 'import pkg_resources\n'), ((521, 532), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (530, 532), False, 'import os\n'), ((614, 644), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (642, 644), False, 'import configparser\n'), ((1124, 1168), 'click.Group.get_command', 'click.Group.get_command', (['self', 'ctx', 'cmd_name'], {}), '(self, ctx, cmd_name)\n', (1147, 1168), False, 'import click\n'), ((3028, 3055), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (3042, 3055), False, 'import os\n'), ((3250, 3300), 'yaml.dump', 'yaml.dump', (['conf', 'outfile'], {'default_flow_style': '(False)'}), '(conf, outfile, default_flow_style=False)\n', (3259, 3300), False, 'import yaml\n'), ((3464, 3529), 'click.echo', 'click.echo', (['"""Task must be shorter than 40 chars. Brevity counts."""'], {}), "('Task must be shorter than 40 chars. Brevity counts.')\n", (3474, 3529), False, 'import click\n'), ((4458, 4502), 'click.echo', 'click.echo', (['"""No existing task with that id."""'], {}), "('No existing task with that id.')\n", (4468, 4502), False, 'import click\n'), ((5838, 5891), 'click.echo', 'click.echo', (["('Regressing task %s to in-progress.' % id)"], {}), "('Regressing task %s to in-progress.' % id)\n", (5848, 5891), False, 'import click\n'), ((8841, 8891), 'yaml.dump', 'yaml.dump', (['data', 'outfile'], {'default_flow_style': '(False)'}), '(data, outfile, default_flow_style=False)\n', (8850, 8891), False, 'import yaml\n'), ((8990, 9013), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (9008, 9013), False, 'import os\n'), ((10003, 10026), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10024, 10026), False, 'import datetime\n'), ((1556, 1602), 'click.Group.get_command', 'click.Group.get_command', (['self', 'ctx', 'actual_cmd'], {}), '(self, ctx, actual_cmd)\n', (1579, 1602), False, 'import click\n'), ((2546, 2571), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2561, 2571), False, 'import os\n'), ((3076, 3138), 'click.confirm', 'click.confirm', (['"""Config file exists. Do you want to overwrite?"""'], {}), "('Config file exists. Do you want to overwrite?')\n", (3089, 3138), False, 'import click\n'), ((3804, 3854), 'click.echo', 'click.echo', (['"""No new todos, limit reached already."""'], {}), "('No new todos, limit reached already.')\n", (3814, 3854), False, 'import click\n'), ((4155, 4219), 'click.echo', 'click.echo', (["('Creating new task w/ id: %d -> %s' % (new_id, task))"], {}), "('Creating new task w/ id: %d -> %s' % (new_id, task))\n", (4165, 4219), False, 'import click\n'), ((5124, 5174), 'click.echo', 'click.echo', (['"""No new tasks, limit reached already."""'], {}), "('No new tasks, limit reached already.')\n", (5134, 5174), False, 'import click\n'), ((5201, 5253), 'click.echo', 'click.echo', (["('Promoting task %s to in-progress.' % id)"], {}), "('Promoting task %s to in-progress.' % id)\n", (5211, 5253), False, 'import click\n'), ((5411, 5456), 'click.echo', 'click.echo', (["('Promoting task %s to done.' % id)"], {}), "('Promoting task %s to done.' % id)\n", (5421, 5456), False, 'import click\n'), ((5576, 5627), 'click.echo', 'click.echo', (["('Already done, can not promote %s' % id)"], {}), "('Already done, can not promote %s' % id)\n", (5586, 5627), False, 'import click\n'), ((6041, 6087), 'click.echo', 'click.echo', (["('Regressing task %s to todo.' % id)"], {}), "('Regressing task %s to todo.' % id)\n", (6051, 6087), False, 'import click\n'), ((6207, 6261), 'click.echo', 'click.echo', (["('Already in todo, can not regress %s' % id)"], {}), "('Already in todo, can not regress %s' % id)\n", (6217, 6261), False, 'import click\n'), ((8476, 8522), 'click.echo', 'click.echo', (['"""No data, initializing data file."""'], {}), "('No data, initializing data file.')\n", (8486, 8522), False, 'import click\n'), ((9458, 9517), 'rich.print', 'print', (["('Ensure %s/.clikan.yaml exists and is valid.' % home)"], {}), "('Ensure %s/.clikan.yaml exists and is valid.' % home)\n", (9463, 9517), False, 'from rich import print\n'), ((9526, 9536), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9534, 9536), False, 'import sys\n'), ((2062, 2108), 'click.Group.get_command', 'click.Group.get_command', (['self', 'ctx', 'matches[0]'], {}), '(self, ctx, matches[0])\n', (2085, 2108), False, 'import click\n'), ((8200, 8241), 'yaml.load', 'yaml.load', (['stream'], {'Loader': 'yaml.FullLoader'}), '(stream, Loader=yaml.FullLoader)\n', (8209, 8241), False, 'import yaml\n'), ((8655, 8696), 'yaml.load', 'yaml.load', (['stream'], {'Loader': 'yaml.FullLoader'}), '(stream, Loader=yaml.FullLoader)\n', (8664, 8696), False, 'import yaml\n'), ((9246, 9287), 'yaml.load', 'yaml.load', (['stream'], {'Loader': 'yaml.FullLoader'}), '(stream, Loader=yaml.FullLoader)\n', (9255, 9287), False, 'import yaml\n'), ((8300, 8399), 'rich.print', 'print', (["('Ensure %s exists, as you specified it as the clikan data file.' % config[\n 'clikan_data'])"], {}), "('Ensure %s exists, as you specified it as the clikan data file.' %\n config['clikan_data'])\n", (8305, 8399), False, 'from rich import print\n'), ((8437, 8447), 'rich.print', 'print', (['exc'], {}), '(exc)\n', (8442, 8447), False, 'from rich import print\n'), ((9339, 9402), 'rich.print', 'print', (["('Ensure %s/.clikan.yaml is valid, expected YAML.' % home)"], {}), "('Ensure %s/.clikan.yaml is valid, expected YAML.' % home)\n", (9344, 9402), False, 'from rich import print\n'), ((9419, 9429), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9427, 9429), False, 'import sys\n')] |
RagtagOpen/python-social-auth-ragtag-id | social_auth_ragtag_id/backends.py | 8d8e005231c09535098136213347934e9da7b3f2 | from social_core.backends.oauth import BaseOAuth2
class RagtagOAuth2(BaseOAuth2):
"""Ragtag ID OAuth authentication backend"""
name = "ragtag"
AUTHORIZATION_URL = "https://id.ragtag.org/oauth/authorize/"
ACCESS_TOKEN_URL = "https://id.ragtag.org/oauth/token/"
ACCESS_TOKEN_METHOD = "POST"
REVOKE_TOKEN_URL = "https://id.ragtag.org/oauth/revoke_token/"
SCOPE_SEPARATOR = " "
ID_KEY = "id"
def get_user_details(self, response):
"""Return user details from Ragtag ID account"""
return {
"username": response.get("username"),
"email": response.get("email"),
"first_name": response.get("first_name"),
"last_name": response.get("last_name"),
}
def user_data(self, access_token, *args, **kwargs):
"""Fetches user data from id.ragtag.org"""
return self.get_json(
"https://id.ragtag.org/api/me/",
headers={"Authorization": "Bearer {}".format(access_token)},
)
def auth_params(self, state=None):
params = super(RagtagOAuth2, self).auth_params(state=state)
approval_prompt = self.setting("APPROVAL_PROMPT", "auto")
if not approval_prompt == "auto":
params["approval_prompt"] = self.setting("APPROVAL_PROMPT", "")
return params
| [] |
angeelgarr/DCPanel | panel/api/models/provider.py | 1901a0f4b1b4273b60d3a218797fb6614d05b4c0 | from django.db import models
from django.contrib import admin
class Provider(models.Model):
name = models.CharField(max_length=50)
domain = models.CharField(max_length=50)
class Meta:
ordering = ['name']
app_label = 'api'
def __str__(self):
return self.domain
@admin.register(Provider)
class ProviderAdmin(admin.ModelAdmin):
list_display = ('name', 'domain')
| [((307, 331), 'django.contrib.admin.register', 'admin.register', (['Provider'], {}), '(Provider)\n', (321, 331), False, 'from django.contrib import admin\n'), ((105, 136), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (121, 136), False, 'from django.db import models\n'), ((150, 181), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (166, 181), False, 'from django.db import models\n')] |
siddharthumakarthikeyan/Cable-Driven-Parallel-Robots-CDPR-Modelling | trial/src/sender.py | 4e8d991d55ae7da91b3c90773c679f3369a4dafa | #!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import String
from gazebo_msgs.msg import LinkState
def talker():
pub = rospy.Publisher('/gazebo/set_link_state', LinkState, queue_size=10)
ppp = LinkState()
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(100) # 10hz
i = 1
while not rospy.is_shutdown():
ppp.link_name = "platform"
ppp.pose.position.x = 0.1
ppp.pose.position.y = 0.1
ppp.pose.position.z = 1
ppp.pose.orientation.x = 0
ppp.pose.orientation.y = 0
ppp.pose.orientation.z = 0
ppp.pose.orientation.w = 0
i = i+1
rospy.loginfo(ppp)
pub.publish(ppp)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| [((161, 228), 'rospy.Publisher', 'rospy.Publisher', (['"""/gazebo/set_link_state"""', 'LinkState'], {'queue_size': '(10)'}), "('/gazebo/set_link_state', LinkState, queue_size=10)\n", (176, 228), False, 'import rospy\n'), ((239, 250), 'gazebo_msgs.msg.LinkState', 'LinkState', ([], {}), '()\n', (248, 250), False, 'from gazebo_msgs.msg import LinkState\n'), ((255, 296), 'rospy.init_node', 'rospy.init_node', (['"""talker"""'], {'anonymous': '(True)'}), "('talker', anonymous=True)\n", (270, 296), False, 'import rospy\n'), ((313, 328), 'rospy.Rate', 'rospy.Rate', (['(100)'], {}), '(100)\n', (323, 328), False, 'import rospy\n'), ((360, 379), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (377, 379), False, 'import rospy\n'), ((680, 698), 'rospy.loginfo', 'rospy.loginfo', (['ppp'], {}), '(ppp)\n', (693, 698), False, 'import rospy\n')] |
kimmokal/CC-Art-Critics | discriminator_dataset.py | af83762a5f22043f279c167cbd58e16737e3ec87 | import torch
from os import listdir, path
from PIL import Image
import torchvision
class DiscriminatorDataset(torch.utils.data.Dataset):
def __init__(self):
super(DiscriminatorDataset, self).__init__()
currentDir = path.dirname(__file__)
abstractDir = path.join(currentDir, 'image_data/abstract')
realisticDir = path.join(currentDir, 'image_data/realistic')
abstractFiles = [path.join(abstractDir, f) for f in listdir(
abstractDir) if path.isfile(path.join(abstractDir, f))]
realisticFiles = [path.join(realisticDir, f) for f in listdir(
realisticDir) if path.isfile(path.join(realisticDir, f))]
self.abstractFilesLen = len(abstractFiles)
self.allFiles = abstractFiles + realisticFiles
def __len__(self):
return len(self.allFiles)
def __getitem__(self, index):
filename = self.allFiles[index]
pilImage = Image.open(filename).convert("RGB")
return (torchvision.transforms.ToTensor()(pilImage), 1 if index < self.abstractFilesLen else 0)
| [((238, 260), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (250, 260), False, 'from os import listdir, path\n'), ((283, 327), 'os.path.join', 'path.join', (['currentDir', '"""image_data/abstract"""'], {}), "(currentDir, 'image_data/abstract')\n", (292, 327), False, 'from os import listdir, path\n'), ((351, 396), 'os.path.join', 'path.join', (['currentDir', '"""image_data/realistic"""'], {}), "(currentDir, 'image_data/realistic')\n", (360, 396), False, 'from os import listdir, path\n'), ((422, 447), 'os.path.join', 'path.join', (['abstractDir', 'f'], {}), '(abstractDir, f)\n', (431, 447), False, 'from os import listdir, path\n'), ((560, 586), 'os.path.join', 'path.join', (['realisticDir', 'f'], {}), '(realisticDir, f)\n', (569, 586), False, 'from os import listdir, path\n'), ((457, 477), 'os.listdir', 'listdir', (['abstractDir'], {}), '(abstractDir)\n', (464, 477), False, 'from os import listdir, path\n'), ((596, 617), 'os.listdir', 'listdir', (['realisticDir'], {}), '(realisticDir)\n', (603, 617), False, 'from os import listdir, path\n'), ((933, 953), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (943, 953), False, 'from PIL import Image\n'), ((985, 1018), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (1016, 1018), False, 'import torchvision\n'), ((506, 531), 'os.path.join', 'path.join', (['abstractDir', 'f'], {}), '(abstractDir, f)\n', (515, 531), False, 'from os import listdir, path\n'), ((646, 672), 'os.path.join', 'path.join', (['realisticDir', 'f'], {}), '(realisticDir, f)\n', (655, 672), False, 'from os import listdir, path\n')] |
ionata/django-emailmeld | emailmeld/sender.py | 28326933d22957f8737ab8a9564daa9cbfca6d06 | from django.core.mail.message import EmailMessage, EmailMultiAlternatives
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
def send_mail_task(subject, message, from_email, recipient_list):
message = EmailMessage("Discover Special Value - {0}".format(subject), message, from_email, recipient_list)
message.send()
def send_html_mail_task(subject, text_message, html_message, from_email, recipient_list, template='email/email_base.html'):
if template is not None:
html_message = render_to_string(template, {'content': mark_safe(html_message)}) # render html into an email template
message = EmailMultiAlternatives("Discover Special Value - {0}".format(subject), html_message, from_email, recipient_list)
message.content_subtype = "html"
message.attach_alternative(text_message, "text/plain")
message.send()
| [((644, 667), 'django.utils.safestring.mark_safe', 'mark_safe', (['html_message'], {}), '(html_message)\n', (653, 667), False, 'from django.utils.safestring import mark_safe\n')] |
sander-vd/HAP-python | tests/test_hap_server.py | 991761ceadfd7796d454d61c87be7f5d4b75d432 | """Tests for the HAPServer."""
from socket import timeout
from unittest.mock import Mock, MagicMock, patch
import pytest
from pyhap import hap_server
@patch('pyhap.hap_server.HAPServer.server_bind', new=MagicMock())
@patch('pyhap.hap_server.HAPServer.server_activate', new=MagicMock())
def test_finish_request_pops_socket():
"""Test that ``finish_request`` always clears the connection after a request."""
amock = Mock()
client_addr = ('192.168.1.1', 55555)
server_addr = ('', 51826)
# Positive case: The request is handled
server = hap_server.HAPServer(server_addr, amock,
handler_type=lambda *args: MagicMock())
server.connections[client_addr] = amock
server.finish_request(amock, client_addr)
assert len(server.connections) == 0
# Negative case: The request fails with a timeout
def raises(*args):
raise timeout()
server = hap_server.HAPServer(server_addr, amock,
handler_type=raises)
server.connections[client_addr] = amock
server.finish_request(amock, client_addr)
assert len(server.connections) == 0
# Negative case: The request raises some other exception
server = hap_server.HAPServer(server_addr, amock,
handler_type=lambda *args: 1 / 0)
server.connections[client_addr] = amock
with pytest.raises(Exception):
server.finish_request(amock, client_addr)
assert len(server.connections) == 0
| [((426, 432), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (430, 432), False, 'from unittest.mock import Mock, MagicMock, patch\n'), ((924, 985), 'pyhap.hap_server.HAPServer', 'hap_server.HAPServer', (['server_addr', 'amock'], {'handler_type': 'raises'}), '(server_addr, amock, handler_type=raises)\n', (944, 985), False, 'from pyhap import hap_server\n'), ((1226, 1300), 'pyhap.hap_server.HAPServer', 'hap_server.HAPServer', (['server_addr', 'amock'], {'handler_type': '(lambda *args: 1 / 0)'}), '(server_addr, amock, handler_type=lambda *args: 1 / 0)\n', (1246, 1300), False, 'from pyhap import hap_server\n'), ((901, 910), 'socket.timeout', 'timeout', ([], {}), '()\n', (908, 910), False, 'from socket import timeout\n'), ((1389, 1413), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1402, 1413), False, 'import pytest\n'), ((207, 218), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (216, 218), False, 'from unittest.mock import Mock, MagicMock, patch\n'), ((277, 288), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (286, 288), False, 'from unittest.mock import Mock, MagicMock, patch\n'), ((664, 675), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (673, 675), False, 'from unittest.mock import Mock, MagicMock, patch\n')] |
charlesashby/marketvault-front-end | app/views/main.py | 758cf8ba1d8486f45eac093ded78a15fc82df3dc | from flask import render_template, Blueprint, request
from app.utils.search import MySQLClient
from app.utils.preprocessor import TextPreprocessor
mainbp = Blueprint("main", __name__)
@mainbp.route("/search", methods=["GET"])
@mainbp.route("/", methods=["GET"])
def home():
stores_by_page = 10
topic = request.args.get("topic")
category = request.args.get("category")
daily_visitors = request.args.get("dailyvisitors")
alexa_rank = request.args.get("alexarank")
page = request.args.get("page") or 0
if all([topic is None, category is None, daily_visitors is None, alexa_rank is None]):
stores = MySQLClient.random_stores(page * stores_by_page, stores_by_page)
else:
stores = MySQLClient.search_stores(category, daily_visitors, alexa_rank, topic, page * stores_by_page, stores_by_page)
stores = [
{
"url": store.url,
"description": TextPreprocessor.clean_str(store.description),
"title": TextPreprocessor.clean_str(store.title),
"alexa_rank": store.alexa_rank,
"category": store.category,
"average_product_price": store.average_product_price,
"daily_visitors": store.daily_visitors
} for store in stores
]
return render_template("search/index.html", stores=stores)
@mainbp.route("/search/topics", methods=["GET"])
def search_topics():
substring = request.args.get("q")
return [
{
"id": topic.id,
"text": topic.text
} for topic in MySQLClient.search_topic_by_substring(substring)
]
| [((159, 186), 'flask.Blueprint', 'Blueprint', (['"""main"""', '__name__'], {}), "('main', __name__)\n", (168, 186), False, 'from flask import render_template, Blueprint, request\n'), ((315, 340), 'flask.request.args.get', 'request.args.get', (['"""topic"""'], {}), "('topic')\n", (331, 340), False, 'from flask import render_template, Blueprint, request\n'), ((356, 384), 'flask.request.args.get', 'request.args.get', (['"""category"""'], {}), "('category')\n", (372, 384), False, 'from flask import render_template, Blueprint, request\n'), ((406, 439), 'flask.request.args.get', 'request.args.get', (['"""dailyvisitors"""'], {}), "('dailyvisitors')\n", (422, 439), False, 'from flask import render_template, Blueprint, request\n'), ((457, 486), 'flask.request.args.get', 'request.args.get', (['"""alexarank"""'], {}), "('alexarank')\n", (473, 486), False, 'from flask import render_template, Blueprint, request\n'), ((1280, 1331), 'flask.render_template', 'render_template', (['"""search/index.html"""'], {'stores': 'stores'}), "('search/index.html', stores=stores)\n", (1295, 1331), False, 'from flask import render_template, Blueprint, request\n'), ((1420, 1441), 'flask.request.args.get', 'request.args.get', (['"""q"""'], {}), "('q')\n", (1436, 1441), False, 'from flask import render_template, Blueprint, request\n'), ((498, 522), 'flask.request.args.get', 'request.args.get', (['"""page"""'], {}), "('page')\n", (514, 522), False, 'from flask import render_template, Blueprint, request\n'), ((637, 701), 'app.utils.search.MySQLClient.random_stores', 'MySQLClient.random_stores', (['(page * stores_by_page)', 'stores_by_page'], {}), '(page * stores_by_page, stores_by_page)\n', (662, 701), False, 'from app.utils.search import MySQLClient\n'), ((729, 842), 'app.utils.search.MySQLClient.search_stores', 'MySQLClient.search_stores', (['category', 'daily_visitors', 'alexa_rank', 'topic', '(page * stores_by_page)', 'stores_by_page'], {}), '(category, daily_visitors, alexa_rank, topic, page *\n stores_by_page, stores_by_page)\n', (754, 842), False, 'from app.utils.search import MySQLClient\n'), ((922, 967), 'app.utils.preprocessor.TextPreprocessor.clean_str', 'TextPreprocessor.clean_str', (['store.description'], {}), '(store.description)\n', (948, 967), False, 'from app.utils.preprocessor import TextPreprocessor\n'), ((990, 1029), 'app.utils.preprocessor.TextPreprocessor.clean_str', 'TextPreprocessor.clean_str', (['store.title'], {}), '(store.title)\n', (1016, 1029), False, 'from app.utils.preprocessor import TextPreprocessor\n'), ((1547, 1595), 'app.utils.search.MySQLClient.search_topic_by_substring', 'MySQLClient.search_topic_by_substring', (['substring'], {}), '(substring)\n', (1584, 1595), False, 'from app.utils.search import MySQLClient\n')] |
tinapiao/Software-IC-Automation | bag_testbenches/ckt_dsn/analog/amplifier/opamp_two_stage.py | 74b23cd94aa6e4658b110e93b5deb635e014f3a6 | # -*- coding: utf-8 -*-
"""This module contains design algorithm for a traditional two stage operational amplifier."""
from typing import TYPE_CHECKING, List, Optional, Dict, Any, Tuple, Sequence
from copy import deepcopy
import numpy as np
import scipy.optimize as sciopt
from bag.math import gcd
from bag.data.lti import LTICircuit, get_stability_margins, get_w_crossings, get_w_3db
from bag.util.search import FloatBinaryIterator, BinaryIterator, minimize_cost_golden
from bag.simulation.core import MeasurementManager
from verification.mos.query import MOSDBDiscrete
from .components import LoadDiodePFB, InputGm
if TYPE_CHECKING:
from verification.ac.core import ACTB
class TailStage1(object):
"""Tail transistor of the first stage op amp.
Due to layout restrictions, the tail transistor needs to have the same number of fingers
and stack number as the input transistor. This method finds the optimal width/intent.
"""
def __init__(self, mos_db):
# type: (MOSDBDiscrete) -> None
self._db = mos_db
self._intent_list = mos_db.get_dsn_param_values('intent')
self._valid_widths = mos_db.width_list
self._best_op = None
def design(self,
itarg_list, # type: List[float]
vd_list, # type: List[float]
vout_amp_list, # type: List[float]
vb, # type: float
l, # type: float
seg, # type: int
stack, # type: int
):
# type: (...) -> None
vgs_idx = self._db.get_fun_arg_index('vgs')
self._best_op = best_score = None
for intent in self._intent_list:
for w in self._valid_widths:
self._db.set_dsn_params(l=l, w=w, intent=intent, stack=stack)
ib = self._db.get_function_list('ibias')
gds = self._db.get_function_list('gds')
vgs_min, vgs_max = ib[0].get_input_range(vgs_idx)
vg_min = vgs_min + vb
vg_max = vgs_max + vb
# find vgs for each corner
vgs_list, gds1_list, gds2_list = self._solve_vgs(itarg_list, vout_amp_list, vd_list,
ib, gds, seg, vb, vg_min, vg_max)
if vgs_list is not None:
cur_score = max(gds2_list)
if self._best_op is None or cur_score < best_score:
best_score = cur_score
self._best_op = (w, intent, seg, stack, vb, vgs_list, vout_amp_list,
gds1_list, gds2_list)
def _solve_vgs(self, itarg_list, vout_list, vd_list, ib_list, gds_list, seg, vb, vg_min,
vg_max):
vgs_list, gds1_list, gds2_list = [], [], []
for itarg, vout, vd, ibf, gdsf in zip(itarg_list, vout_list, vd_list, ib_list, gds_list):
def zero_fun(vg):
farg = self._db.get_fun_arg(vbs=vb - vd, vds=vd - vb, vgs=vg - vb)
return seg * ibf(farg) - itarg
v1, v2 = zero_fun(vg_min), zero_fun(vg_max)
if v1 < 0 and v2 < 0 or v1 > 0 and v2 > 0:
# no solution
return None, None, None
vg_sol = sciopt.brentq(zero_fun, vg_min, vg_max) # type: float
vgs_opt = vg_sol - vb
arg1 = self._db.get_fun_arg(vbs=vb - vd, vds=vd - vb, vgs=vgs_opt)
arg2 = self._db.get_fun_arg(vbs=vb - vd, vds=vout - vb, vgs=vgs_opt)
vgs_list.append(vgs_opt)
gds1_list.append(seg * gdsf(arg1))
gds2_list.append(seg * gdsf(arg2))
return vgs_list, gds1_list, gds2_list
def get_dsn_info(self):
# type: () -> Optional[Dict[str, Any]]
if self._best_op is None:
return None
w, intent, seg, stack, vb, vgs_list, vout_list, gds1_list, gds2_list = self._best_op
self._db.set_dsn_params(w=w, intent=intent, stack=stack)
cdd = self._db.get_function_list('cdd')
cdd2_list = []
for vgs, vout, cddf in zip(vgs_list, vout_list, cdd):
arg = self._db.get_fun_arg(vbs=0, vds=vout - vb, vgs=vgs)
cur_cdd = cddf(arg) # type: float
cdd2_list.append(seg * cur_cdd)
return dict(
w=w,
intent=intent,
vgs=vgs_list,
gds1=gds1_list,
gds2=gds2_list,
cdd2=cdd2_list,
)
class StageOneCurrentError(Exception):
pass
class OpAmpTwoStage(object):
"""A two stage fully differential operational amplifier.
The first stage is a differential amplifier with diode + positive feedback load, the
second stage is a psuedo-differential common source amplifier.
This topology has the following advantages:
1. large output swing.
2. Common mode feedback is only required for the second stage.
"""
def __init__(self, nch_db, pch_db):
# type: (MOSDBDiscrete, MOSDBDiscrete) -> None
self._nch_db = nch_db
self._pch_db = pch_db
self._amp_info = None
def design(self,
i1_unit, # type: List[float]
i1_min_size, # type: int
vg_list, # type: List[float]
vout_list, # type: List[float]
cpar1, # type: float
cload, # type: float
f_unit, # type: float
phase_margin, # type: float
res_var, # type: float
l, # type: float
vstar_gm_min, # type: float
ft_load_scale, # type: float
vds_tail_min, # type: float
seg_gm_min, # type: int
vdd, # type: float
pmos_input=True, # type: bool
max_ref_ratio=20, # type: int
load_stack_list=None, # type: Optional[List[int]]
):
# type: (...) -> None
# binary search for minimum stage 1 current,
i1_size_iter = BinaryIterator(i1_min_size, None)
i1_size_opt, opt_info = None, None
while i1_size_iter.has_next():
i1_size = i1_size_iter.get_next()
print('trying i1_size = %d' % i1_size)
try:
self._design_with_itarg(i1_size, i1_unit, vg_list, vout_list, cpar1, cload,
f_unit, phase_margin, res_var, l, vstar_gm_min,
ft_load_scale, vds_tail_min, seg_gm_min,
vdd, pmos_input, max_ref_ratio, load_stack_list)
success = True
except StageOneCurrentError as err:
print(err)
success = False
if success:
print('success')
opt_info = self._amp_info
i1_size_opt = i1_size
i1_size_iter.down()
else:
i1_size_iter.up()
# linear search to find optimal scale2
scale2_int_max = int(opt_info['scale2'])
if scale2_int_max == opt_info['scale2']:
scale2_int_max -= 1
last_i1_size = i1_size_opt
print('i1_size = %d, scale2 = %.4g' % (i1_size_opt, opt_info['scale2']))
for scale2_test in range(scale2_int_max, 0, -1):
i1_size_test = int(np.floor(i1_size_opt * (1 + opt_info['scale2']) / (1 + scale2_test)))
if i1_size_test <= last_i1_size or scale2_test == opt_info['scale2']:
continue
print('testing i1_size = %d, scale2 = %.4g' % (i1_size_test, scale2_test))
try:
self._design_with_itarg(i1_size_test, i1_unit, vg_list, vout_list, cpar1, cload,
f_unit, phase_margin, res_var, l, vstar_gm_min,
ft_load_scale, vds_tail_min, seg_gm_min,
vdd, pmos_input, max_ref_ratio, load_stack_list)
except StageOneCurrentError as err:
print(err)
continue
if self._amp_info['scale2'] <= scale2_test:
# found new minimum. close in to find optimal i1 size
opt_info = self._amp_info
i1_size_opt = i1_size_test
print('update: i1_size = %d, scale2 = %.4g' % (i1_size_opt, opt_info['scale2']))
i1_size_iter = BinaryIterator(last_i1_size + 1, i1_size_test)
while i1_size_iter.has_next():
i1_size_cur_opt = i1_size_iter.get_next()
print('testing i1_size = %d' % i1_size_cur_opt)
try:
self._design_with_itarg(i1_size_cur_opt, i1_unit, vg_list, vout_list, cpar1,
cload, f_unit, phase_margin, res_var, l,
vstar_gm_min, ft_load_scale, vds_tail_min,
seg_gm_min, vdd, pmos_input, max_ref_ratio,
load_stack_list)
if self._amp_info['scale2'] <= opt_info['scale2']:
opt_info = self._amp_info
i1_size_opt = i1_size_cur_opt
print('update: i1_size = %d, '
'scale2 = %.4g' % (i1_size_opt, opt_info['scale2']))
i1_size_iter.down()
else:
i1_size_iter.up()
except StageOneCurrentError as err:
print(err)
i1_size_iter.up()
last_i1_size = i1_size_test
self._amp_info = opt_info
def _design_with_itarg(self,
i1_size, # type: int
i1_unit, # type: List[float]
vg_list, # type: List[float]
vout_list, # type: List[float]
cpar1, # type: float
cload, # type: float
f_unit, # type: float
phase_margin, # type: float
res_var, # type: float
l, # type: float
vstar_gm_min, # type: float
ft_load_scale, # type: float
vds_tail_min, # type: float
seg_gm_min, # type: int
vdd, # type: float
pmos_input, # type: bool
max_ref_ratio, # type: int
load_stack_list, # type: Optional[List[int]]
):
# type: (...) -> None
itarg_list = [i1 * i1_size for i1 in i1_unit]
if pmos_input:
load_db = self._nch_db
gm_db = self._pch_db
vds2_list = vout_list
vb_gm = vdd
vb_load = 0
else:
load_db = self._pch_db
gm_db = self._nch_db
vds2_list = [vo - vdd for vo in vout_list]
vb_gm = 0
vb_load = vdd
load = LoadDiodePFB(load_db)
gm = InputGm(gm_db)
tail1 = TailStage1(gm_db)
# design load
print('designing load')
load.design(itarg_list, vds2_list, ft_load_scale * f_unit, stack_list=load_stack_list)
load_info = load.get_dsn_info()
vgs_load_list = load_info['vgs']
gds_load_list = load_info['gds1']
gm2_list = load_info['gm2']
stack_diode = load_info['stack_diode']
stack_ngm = load_info['stack_ngm']
seg_diode = load_info['seg_diode']
seg_ngm = load_info['seg_ngm']
if pmos_input:
vmid_list = vgs_load_list
else:
vmid_list = [vdd - vgs for vgs in vgs_load_list]
# design input gm
print('designing input gm')
gm.design(itarg_list, vg_list, vmid_list, gds_load_list, vb_gm, vstar_gm_min, vds_tail_min,
seg_min=seg_gm_min, stack_list=[stack_ngm])
gm_info = gm.get_dsn_info()
gm1_list = gm_info['gm']
gds_in_list = gm_info['gds']
vtail_list = gm_info['vs']
seg_gm = gm_info['seg']
stack_gm = gm_info['stack']
gds1_list = [gds_in + gds_load for gds_in, gds_load in zip(gds_in_list, gds_load_list)]
gain1_list = [gm1 / gds1 for gm1, gds1 in zip(gm1_list, gds1_list)]
# design stage 1 tail
print('designing tail')
tail1.design(itarg_list, vtail_list, vout_list, vb_gm, l, seg_gm, stack_gm)
tail1_info = tail1.get_dsn_info()
vbias_list = [vgs_tail + vb_gm for vgs_tail in tail1_info['vgs']]
# design stage 2 gm
w_dict = {'load': load_info['w'], 'in': gm_info['w'], 'tail': tail1_info['w']}
th_dict = {'load': load_info['intent'], 'in': gm_info['intent'],
'tail': tail1_info['intent']}
stack_dict = {'tail': stack_gm, 'in': stack_gm, 'diode': stack_diode, 'ngm': stack_ngm}
seg_dict = {'tail1': seg_gm,
'in': seg_gm,
'diode1': seg_diode,
'ngm1': seg_ngm,
}
print('designing stage 2')
stage2_results = self._design_stage2(gm_db, load_db, vtail_list, vg_list, vmid_list,
vout_list, vbias_list, vb_gm, vb_load, cload, cpar1,
w_dict, th_dict, stack_dict, seg_dict, gm2_list,
res_var, phase_margin, f_unit, max_ref_ratio)
scale2 = seg_dict['diode2'] / seg_dict['diode1']
scaler = seg_dict['ref'] / seg_dict['tail1']
itot_list = [(2 * (1 + scale2) + scaler) * itarg for itarg in itarg_list]
layout_info = dict(
w_dict=w_dict,
th_dict=th_dict,
stack_dict=stack_dict,
seg_dict=seg_dict,
)
self._amp_info = dict(
i1_size=i1_size,
scale2=scale2,
scaler=scaler,
vtail=vtail_list,
vmid=vmid_list,
vbias=vbias_list,
itot=itot_list,
vstar=gm_info['vstar'],
cin=gm_info['cgg'],
gm1=gm1_list,
gds1=gds1_list,
gain1=gain1_list,
rfb=stage2_results['rz'],
cfb=stage2_results['cf'],
gain_tot=stage2_results['gain'],
f_3db=stage2_results['f_3db'],
f_unit=stage2_results['f_unity'],
phase_margin=stage2_results['phase_margin'],
layout_info=layout_info,
)
print('done')
def get_dsn_info(self):
# type: () -> Optional[Dict[str, Any]]
return self._amp_info
def get_specs_verification(self, top_specs):
# type: (Dict[str, Any]) -> Dict[str, Any]
top_specs = deepcopy(top_specs)
dsn_specs = top_specs['dsn_specs']
ibias = dsn_specs['i1_unit'][0] * self._amp_info['i1_size'] * self._amp_info['scaler']
vdd = dsn_specs['vdd']
vindc = dsn_specs['vg_list'][0]
voutdc = dsn_specs['vout_list'][0]
f_unit = dsn_specs['f_unit']
gain_max = max(self._amp_info['gain_tot'])
f_bw_log = int(np.floor(np.log10(f_unit / gain_max)))
f_unit_log = int(np.ceil(np.log10(f_unit)))
top_specs['layout_params'].update(self._amp_info['layout_info'])
meas = top_specs['measurements'][0]
meas['cfb'] = self._amp_info['cfb']
meas['rfb'] = self._amp_info['rfb']
ac_tb = meas['testbenches']['ac']
ac_tb['fstart'] = 10 ** (f_bw_log - 1)
ac_tb['fstop'] = 10 ** (f_unit_log + 1)
ac_sim_vars = ac_tb['sim_vars']
ac_sim_vars['vdd'] = vdd
ac_sim_vars['cload'] = dsn_specs['cload']
ac_sim_vars['vincm'] = vindc
ac_sim_vars['voutcm'] = voutdc
ac_sim_vars['ibias'] = ibias
ac_sim_vars['vdd'] = vdd
ac_sim_vars['vinac'] = 1.0
ac_sim_vars['vindc'] = 0.0
"""
top_specs['tb_dc']['tb_params']['vimax'] = vdd
top_specs['tb_dc']['tb_params']['vimin'] = -vdd
top_specs['tb_dc']['tb_params']['vindc'] = vindc
top_specs['tb_dc']['tb_params']['voutcm'] = voutdc
top_specs['tb_dc']['tb_params']['ibias'] = ibias
top_specs['tb_dc']['tb_params']['vdd'] = vdd
top_specs['tb_dc']['tb_params']['voutref'] = voutdc
top_specs['tb_dc']['tb_params']['vout_start'] = -vdd + 0.15
top_specs['tb_dc']['tb_params']['vout_stop'] = vdd - 0.15
"""
return top_specs
def _design_stage2(self, gm_db, load_db, vtail_list, vg_list, vmid_list, vout_list, vbias_list,
vb_gm, vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict,
gm2_list, res_var, phase_margin, f_unit, max_ref_ratio):
seg_tail1 = seg_dict['tail1']
seg_diode1 = seg_dict['diode1']
seg_ngm1 = seg_dict['ngm1']
# step 1: find stage 2 unit size
seg_gcd = gcd(gcd(seg_tail1, seg_diode1), seg_ngm1)
if seg_gcd % 2 != 0:
raise ValueError('All segment numbers must be even.')
# divide seg_gcd by 2 to make sure all generated segment numbers are even
seg_gcd //= 2
# make sure we have enough tail fingers for common mode feedback
min_size = 2 if seg_tail1 // seg_gcd == 2 else 1
def ac_results_fun(cur_size):
seg_dict['tail2'] = seg_tail1 // seg_gcd * cur_size
seg_dict['diode2'] = seg_diode1 // seg_gcd * cur_size
seg_dict['ngm2'] = seg_ngm1 // seg_gcd * cur_size
cur_scale2 = cur_size / seg_gcd
cur_gm2_list = [gm2 * cur_scale2 for gm2 in gm2_list]
ac_results = self._find_rz_cf(gm_db, load_db, vtail_list, vg_list, vmid_list, vout_list,
vbias_list, vb_gm, vb_load, cload, cpar1, w_dict, th_dict,
stack_dict, seg_dict, cur_gm2_list, res_var, phase_margin)
return ac_results
def funity_fun(cur_size):
ac_results_tmp = ac_results_fun(cur_size)
fu_list = ac_results_tmp[0]
if fu_list is None:
return -1
# noinspection PyTypeChecker
ans = min(fu_list)
return ans
# find min_size such that amplifier is stable
min_bin_iter = BinaryIterator(min_size, None)
while min_bin_iter.has_next():
test_size = min_bin_iter.get_next()
test_fu = funity_fun(test_size)
if test_fu >= 0:
min_bin_iter.save()
min_bin_iter.down()
else:
min_bin_iter.up()
min_result = minimize_cost_golden(funity_fun, f_unit, offset=min_bin_iter.get_last_save())
if min_result.x is None:
msg = 'Insufficient stage 1 current. funity_max=%.4g'
raise StageOneCurrentError(msg % min_result.vmax)
funity_list, rz_nom, cf_min, gain_list, f3db_list, pm_list = ac_results_fun(min_result.x)
seg_tail2_tot = seg_dict['tail2']
seg_tail2 = (seg_tail2_tot // 4) * 2
seg_tailcm = seg_tail2_tot - seg_tail2
seg_tail_tot = 2 * (seg_dict['tail1'] + seg_tail2)
seg_dict['tail2'] = seg_tail2
seg_dict['tailcm'] = seg_tailcm
seg_dict['ref'] = max(2, -((-seg_tail_tot // max_ref_ratio) // 2) * 2)
return dict(
rz=rz_nom,
cf=cf_min,
gain=gain_list,
f_3db=f3db_list,
f_unity=funity_list,
phase_margin=pm_list,
)
@classmethod
def _get_stage2_ss(cls, gm2_list, gds2_list, c2_list, cg2_list, cload, seg_gcd, cur_size):
cur_gm2_list, cur_gds2_list, cur_c2_list, cur_cg2_list = [], [], [], []
for gm2, gds2, c2, cg2 in zip(gm2_list, gds2_list, c2_list, cg2_list):
cur_gm2_list.append(gm2 * cur_size / seg_gcd)
cur_gds2_list.append(gds2 * cur_size / seg_gcd)
cur_c2_list.append(cload + c2 * cur_size / seg_gcd)
cur_cg2_list.append(cg2 * cur_size / seg_gcd)
return cur_gm2_list, cur_gds2_list, cur_c2_list, cur_cg2_list
def _find_rz_cf(self, gm_db, load_db, vtail_list, vg_list, vmid_list, vout_list, vbias_list,
vb_gm, vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict,
gm2_list, res_var, phase_margin, cap_tol=1e-15, cap_step=10e-15, cap_min=1e-15,
cap_max=1e-9):
"""Find minimum miller cap that stabilizes the system.
NOTE: This function assume phase of system for any miller cap value will not loop
around 360, otherwise it may get the phase margin wrong. This assumption should be valid
for this op amp.
"""
gz_worst = float(min(gm2_list))
gz_nom = gz_worst * (1 - res_var)
# find maximum Cf needed to stabilize all corners
cf_min = cap_min
for env_idx, (vtail, vg, vmid, vout, vbias) in \
enumerate(zip(vtail_list, vg_list, vmid_list, vout_list, vbias_list)):
cir = self._make_circuit(env_idx, gm_db, load_db, vtail, vg, vmid, vout, vbias, vb_gm,
vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict,
gz_worst)
bin_iter = FloatBinaryIterator(cf_min, None, cap_tol, search_step=cap_step)
while bin_iter.has_next():
cur_cf = bin_iter.get_next()
cir.add_cap(cur_cf, 'outp', 'xp')
cir.add_cap(cur_cf, 'outn', 'xn')
num, den = cir.get_num_den('in', 'out')
cur_pm, _ = get_stability_margins(num, den)
if cur_pm < phase_margin:
if cur_cf > cap_max:
# no way to make amplifier stable, just return
return None, None, None, None, None, None
bin_iter.up()
else:
bin_iter.save()
bin_iter.down()
cir.add_cap(-cur_cf, 'outp', 'xp')
cir.add_cap(-cur_cf, 'outn', 'xn')
# bin_iter is guaranteed to save at least one value, so don't need to worry about
# cf_min being None
cf_min = bin_iter.get_last_save()
# find gain, unity gain bandwidth, and phase margin across corners
gain_list, f3db_list, funity_list, pm_list = [], [], [], []
for env_idx, (vtail, vg, vmid, vout, vbias) in \
enumerate(zip(vtail_list, vg_list, vmid_list, vout_list, vbias_list)):
cir = self._make_circuit(env_idx, gm_db, load_db, vtail, vg, vmid, vout, vbias, vb_gm,
vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict,
gz_nom)
cir.add_cap(cf_min, 'outp', 'xp')
cir.add_cap(cf_min, 'outn', 'xn')
num, den = cir.get_num_den('in', 'out')
pn = np.poly1d(num)
pd = np.poly1d(den)
gain_list.append(abs(pn(0) / pd(0)))
f3db_list.append(get_w_3db(num, den) / 2 / np.pi)
funity_list.append(get_w_crossings(num, den)[0] / 2 / np.pi)
pm_list.append(get_stability_margins(num, den)[0])
return funity_list, 1 / gz_nom, cf_min, gain_list, f3db_list, pm_list
@classmethod
def _make_circuit(cls, env_idx, gm_db, load_db, vtail, vg, vmid, vout, vbias, vb_gm, vb_load,
cload, cpar1, w_dict, th_dict, stack_dict, seg_dict, gz, neg_cap=False,
no_fb=False):
cur_env = gm_db.env_list[env_idx]
gm_db.set_dsn_params(w=w_dict['tail'], intent=th_dict['tail'], stack=stack_dict['tail'])
tail1_params = gm_db.query(env=cur_env, vbs=0, vds=vtail - vb_gm, vgs=vbias - vb_gm)
tail2_params = gm_db.query(env=cur_env, vbs=0, vds=vout - vb_gm, vgs=vbias - vb_gm)
gm_db.set_dsn_params(w=w_dict['in'], intent=th_dict['in'], stack=stack_dict['in'])
gm1_params = gm_db.query(env=cur_env, vbs=vb_gm - vtail, vds=vmid - vtail, vgs=vg - vtail)
load_db.set_dsn_params(w=w_dict['load'], intent=th_dict['load'], stack=stack_dict['diode'])
diode1_params = load_db.query(env=cur_env, vbs=0, vds=vmid - vb_load, vgs=vmid - vb_load)
diode2_params = load_db.query(env=cur_env, vbs=0, vds=vout - vb_load, vgs=vmid - vb_load)
load_db.set_dsn_params(stack=stack_dict['ngm'])
ngm1_params = load_db.query(env=cur_env, vbs=0, vds=vmid - vb_load, vgs=vmid - vb_load)
ngm2_params = load_db.query(env=cur_env, vbs=0, vds=vout - vb_load, vgs=vmid - vb_load)
cir = LTICircuit()
# stage 1
cir.add_transistor(tail1_params, 'tail', 'gnd', 'gnd', 'gnd', fg=seg_dict['tail1'],
neg_cap=neg_cap)
cir.add_transistor(gm1_params, 'midp', 'inn', 'tail', 'gnd', fg=seg_dict['in'],
neg_cap=neg_cap)
cir.add_transistor(gm1_params, 'midn', 'inp', 'tail', 'gnd', fg=seg_dict['in'],
neg_cap=neg_cap)
cir.add_transistor(diode1_params, 'midp', 'midp', 'gnd', 'gnd', fg=seg_dict['diode1'],
neg_cap=neg_cap)
cir.add_transistor(diode1_params, 'midn', 'midn', 'gnd', 'gnd', fg=seg_dict['diode1'],
neg_cap=neg_cap)
cir.add_transistor(ngm1_params, 'midn', 'midp', 'gnd', 'gnd', fg=seg_dict['ngm1'],
neg_cap=neg_cap)
cir.add_transistor(ngm1_params, 'midp', 'midn', 'gnd', 'gnd', fg=seg_dict['ngm1'],
neg_cap=neg_cap)
# stage 2
cir.add_transistor(tail2_params, 'outp', 'gnd', 'gnd', 'gnd', fg=seg_dict['tail2'],
neg_cap=neg_cap)
cir.add_transistor(tail2_params, 'outn', 'gnd', 'gnd', 'gnd', fg=seg_dict['tail2'],
neg_cap=neg_cap)
cir.add_transistor(diode2_params, 'outp', 'midn', 'gnd', 'gnd', fg=seg_dict['diode2'],
neg_cap=neg_cap)
cir.add_transistor(diode2_params, 'outn', 'midp', 'gnd', 'gnd', fg=seg_dict['diode2'],
neg_cap=neg_cap)
cir.add_transistor(ngm2_params, 'outp', 'midn', 'gnd', 'gnd', fg=seg_dict['ngm2'],
neg_cap=neg_cap)
cir.add_transistor(ngm2_params, 'outn', 'midp', 'gnd', 'gnd', fg=seg_dict['ngm2'],
neg_cap=neg_cap)
# parasitic cap
cir.add_cap(cpar1, 'midp', 'gnd')
cir.add_cap(cpar1, 'midn', 'gnd')
# load cap
cir.add_cap(cload, 'outp', 'gnd')
cir.add_cap(cload, 'outn', 'gnd')
# feedback resistors
if not no_fb:
cir.add_conductance(gz, 'xp', 'midn')
cir.add_conductance(gz, 'xn', 'midp')
# diff-to-single conversion
cir.add_vcvs(0.5, 'inp', 'gnd', 'in', 'gnd')
cir.add_vcvs(-0.5, 'inn', 'gnd', 'in', 'gnd')
cir.add_vcvs(1, 'out', 'gnd', 'outp', 'outn')
return cir
class OpAmpTwoStageChar(MeasurementManager):
def __init__(self,
data_dir, # type: str
meas_name, # type: str
impl_lib, # type: str
specs, # type: Dict[str, Any]
wrapper_lookup, # type: Dict[str, str]
sim_view_list, # type: Sequence[Tuple[str, str]]
env_list, # type: Sequence[str]
):
MeasurementManager.__init__(self, data_dir, meas_name, impl_lib, specs, wrapper_lookup,
sim_view_list, env_list)
def get_initial_state(self):
# type: () -> str
"""Returns the initial FSM state."""
return 'ac0'
def get_testbench_info(self, state, prev_output):
rfb0 = self.specs['rfb']
cfb0 = self.specs['cfb']
find_cfb = self.specs.get('find_cfb', True)
res_var = self.specs['res_var']
cmin_scale = self.specs['cmin_scale']
cmax_scale = self.specs['cmax_scale']
num_pts = self.specs['num_pts']
tmp = super(OpAmpTwoStageChar, self).get_testbench_info('ac', prev_output)
tb_name, tb_type, tb_specs, tb_params = tmp
if state == 'ac0' and find_cfb:
cfb_list = np.linspace(cfb0 * cmin_scale, cfb0 * cmax_scale, num_pts).tolist()
tb_specs['sim_vars']['rfb'] = rfb0 * (1 - res_var)
tb_specs['sim_vars']['cfb'] = cfb_list
else:
if find_cfb:
cfb = self.get_state_output('ac0')['cfb']
else:
cfb = cfb0
tb_specs['sim_vars']['rfb'] = rfb0
tb_specs['sim_vars']['cfb'] = cfb
return tb_name, tb_type, tb_specs, tb_params
def process_output(self, state, data, tb_manager):
# type: (str, Dict[str, Any], ACTB) -> Tuple[bool, str, Dict[str, Any]]
phase_margin = self.specs['phase_margin']
find_cfb = self.specs.get('find_cfb', True)
output_list = ['vout']
results = tb_manager.get_ugb_and_pm(data, output_list)
if state == 'ac0' and find_cfb:
done = False
next_state = 'ac1'
cfb = self._find_min_cfb(phase_margin, results)
output = dict(cfb=cfb)
else:
done = True
next_state = ''
if find_cfb:
cfb = self.get_state_output('ac0')['cfb']
else:
cfb = self.specs['cfb']
gain_results = tb_manager.get_gain_and_w3db(data, output_list, output_dict=results)
corner_list = results['corner'].tolist()
gain_list = gain_results['gain_vout'].tolist()
bw_list = gain_results['w3db_vout'].tolist()
funity_list = results['funity_vout'].tolist()
pm_list = results['pm_vout'].tolist()
output = dict(cfb=cfb, corners=corner_list, gain=gain_list, bw=bw_list,
funity=funity_list, pm=pm_list)
return done, next_state, output
@classmethod
def _find_min_cfb(cls, phase_margin, results):
axis_names = ['corner', 'cfb']
corner_list = results['corner']
corner_sort_arg = np.argsort(corner_list) # type: Sequence[int]
# rearrange array axis
sweep_vars = results['sweep_params']['pm_vout']
order = [sweep_vars.index(name) for name in axis_names]
pm_data = np.transpose(results['pm_vout'], axes=order)
# determine minimum cfb
cfb_vec = results['cfb']
cfb_idx_min = 0
for corner_idx in corner_sort_arg:
bin_iter = BinaryIterator(cfb_idx_min, cfb_vec.size)
while bin_iter.has_next():
cur_cfb_idx = bin_iter.get_next()
pm = pm_data[corner_idx, cur_cfb_idx]
if pm >= phase_margin:
bin_iter.save()
bin_iter.down()
else:
bin_iter.up()
cfb_idx_min = bin_iter.get_last_save()
if cfb_idx_min is None:
# No solution; cannot make amplifier stable
break
if cfb_idx_min is None:
raise ValueError('Cannot determine cfb.')
else:
cfb = cfb_vec[cfb_idx_min]
return cfb.item()
| [((6057, 6090), 'bag.util.search.BinaryIterator', 'BinaryIterator', (['i1_min_size', 'None'], {}), '(i1_min_size, None)\n', (6071, 6090), False, 'from bag.util.search import FloatBinaryIterator, BinaryIterator, minimize_cost_golden\n'), ((15101, 15120), 'copy.deepcopy', 'deepcopy', (['top_specs'], {}), '(top_specs)\n', (15109, 15120), False, 'from copy import deepcopy\n'), ((18697, 18727), 'bag.util.search.BinaryIterator', 'BinaryIterator', (['min_size', 'None'], {}), '(min_size, None)\n', (18711, 18727), False, 'from bag.util.search import FloatBinaryIterator, BinaryIterator, minimize_cost_golden\n'), ((25071, 25083), 'bag.data.lti.LTICircuit', 'LTICircuit', ([], {}), '()\n', (25081, 25083), False, 'from bag.data.lti import LTICircuit, get_stability_margins, get_w_crossings, get_w_3db\n'), ((27910, 28026), 'bag.simulation.core.MeasurementManager.__init__', 'MeasurementManager.__init__', (['self', 'data_dir', 'meas_name', 'impl_lib', 'specs', 'wrapper_lookup', 'sim_view_list', 'env_list'], {}), '(self, data_dir, meas_name, impl_lib, specs,\n wrapper_lookup, sim_view_list, env_list)\n', (27937, 28026), False, 'from bag.simulation.core import MeasurementManager\n'), ((30664, 30687), 'numpy.argsort', 'np.argsort', (['corner_list'], {}), '(corner_list)\n', (30674, 30687), True, 'import numpy as np\n'), ((30881, 30925), 'numpy.transpose', 'np.transpose', (["results['pm_vout']"], {'axes': 'order'}), "(results['pm_vout'], axes=order)\n", (30893, 30925), True, 'import numpy as np\n'), ((3302, 3341), 'scipy.optimize.brentq', 'sciopt.brentq', (['zero_fun', 'vg_min', 'vg_max'], {}), '(zero_fun, vg_min, vg_max)\n', (3315, 3341), True, 'import scipy.optimize as sciopt\n'), ((17293, 17319), 'bag.math.gcd', 'gcd', (['seg_tail1', 'seg_diode1'], {}), '(seg_tail1, seg_diode1)\n', (17296, 17319), False, 'from bag.math import gcd\n'), ((21698, 21762), 'bag.util.search.FloatBinaryIterator', 'FloatBinaryIterator', (['cf_min', 'None', 'cap_tol'], {'search_step': 'cap_step'}), '(cf_min, None, cap_tol, search_step=cap_step)\n', (21717, 21762), False, 'from bag.util.search import FloatBinaryIterator, BinaryIterator, minimize_cost_golden\n'), ((23378, 23392), 'numpy.poly1d', 'np.poly1d', (['num'], {}), '(num)\n', (23387, 23392), True, 'import numpy as np\n'), ((23410, 23424), 'numpy.poly1d', 'np.poly1d', (['den'], {}), '(den)\n', (23419, 23424), True, 'import numpy as np\n'), ((31082, 31123), 'bag.util.search.BinaryIterator', 'BinaryIterator', (['cfb_idx_min', 'cfb_vec.size'], {}), '(cfb_idx_min, cfb_vec.size)\n', (31096, 31123), False, 'from bag.util.search import FloatBinaryIterator, BinaryIterator, minimize_cost_golden\n'), ((7384, 7452), 'numpy.floor', 'np.floor', (["(i1_size_opt * (1 + opt_info['scale2']) / (1 + scale2_test))"], {}), "(i1_size_opt * (1 + opt_info['scale2']) / (1 + scale2_test))\n", (7392, 7452), True, 'import numpy as np\n'), ((8460, 8506), 'bag.util.search.BinaryIterator', 'BinaryIterator', (['(last_i1_size + 1)', 'i1_size_test'], {}), '(last_i1_size + 1, i1_size_test)\n', (8474, 8506), False, 'from bag.util.search import FloatBinaryIterator, BinaryIterator, minimize_cost_golden\n'), ((15494, 15521), 'numpy.log10', 'np.log10', (['(f_unit / gain_max)'], {}), '(f_unit / gain_max)\n', (15502, 15521), True, 'import numpy as np\n'), ((15557, 15573), 'numpy.log10', 'np.log10', (['f_unit'], {}), '(f_unit)\n', (15565, 15573), True, 'import numpy as np\n'), ((22031, 22062), 'bag.data.lti.get_stability_margins', 'get_stability_margins', (['num', 'den'], {}), '(num, den)\n', (22052, 22062), False, 'from bag.data.lti import LTICircuit, get_stability_margins, get_w_crossings, get_w_3db\n'), ((23636, 23667), 'bag.data.lti.get_stability_margins', 'get_stability_margins', (['num', 'den'], {}), '(num, den)\n', (23657, 23667), False, 'from bag.data.lti import LTICircuit, get_stability_margins, get_w_crossings, get_w_3db\n'), ((28730, 28788), 'numpy.linspace', 'np.linspace', (['(cfb0 * cmin_scale)', '(cfb0 * cmax_scale)', 'num_pts'], {}), '(cfb0 * cmin_scale, cfb0 * cmax_scale, num_pts)\n', (28741, 28788), True, 'import numpy as np\n'), ((23503, 23522), 'bag.data.lti.get_w_3db', 'get_w_3db', (['num', 'den'], {}), '(num, den)\n', (23512, 23522), False, 'from bag.data.lti import LTICircuit, get_stability_margins, get_w_crossings, get_w_3db\n'), ((23567, 23592), 'bag.data.lti.get_w_crossings', 'get_w_crossings', (['num', 'den'], {}), '(num, den)\n', (23582, 23592), False, 'from bag.data.lti import LTICircuit, get_stability_margins, get_w_crossings, get_w_3db\n')] |
Subsets and Splits