filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_19489
|
from PyQt5 import QtCore, QtGui, QtWidgets
from morphine.globals import __enviroments__, __intended_audience__, __programming_lang__, __license__, find_packages
import os
from yapf.yapflib.yapf_api import FormatCode
from morphine.template import __minimal_deps__ as template
class Ui_minimal_deps(object):
def __init__(self, data):
self.data = data
pass
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(840, 490)
MainWindow.setMinimumSize(QtCore.QSize(840, 490))
MainWindow.setMaximumSize(QtCore.QSize(840, 490))
self.obj = MainWindow
resolution = QtWidgets.QDesktopWidget().screenGeometry()
MainWindow.move(
(resolution.width() / 2) - (MainWindow.frameSize().width() / 2),
(resolution.height() / 2) - (MainWindow.frameSize().height() / 2),
)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 10, 111, 30))
self.label.setObjectName("label")
self.shortDesc = QtWidgets.QLineEdit(self.centralwidget)
self.shortDesc.setGeometry(QtCore.QRect(130, 10, 701, 30))
self.shortDesc.setObjectName("shortDesc")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(10, 50, 111, 30))
self.label_2.setObjectName("label_2")
self.longDesc = QtWidgets.QLineEdit(self.centralwidget)
self.longDesc.setGeometry(QtCore.QRect(130, 50, 661, 30))
self.longDesc.setObjectName("longDesc")
self.dependencies = QtWidgets.QLineEdit(self.centralwidget)
self.dependencies.setGeometry(QtCore.QRect(130, 90, 301, 30))
self.dependencies.setObjectName("dependencies")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(10, 90, 111, 30))
self.label_3.setObjectName("label_3")
self.packages = QtWidgets.QLineEdit(self.centralwidget)
self.packages.setGeometry(QtCore.QRect(530, 90, 241, 30))
self.packages.setObjectName("packages")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(450, 90, 71, 30))
self.label_4.setObjectName("label_4")
self.keywords = QtWidgets.QLineEdit(self.centralwidget)
self.keywords.setGeometry(QtCore.QRect(530, 130, 300, 30))
self.keywords.setObjectName("keywords")
self.pyrec = QtWidgets.QLineEdit(self.centralwidget)
self.pyrec.setGeometry(QtCore.QRect(130, 130, 301, 30))
self.pyrec.setObjectName("pyrec")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(450, 130, 71, 30))
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(10, 130, 111, 30))
self.label_6.setObjectName("label_6")
self.envList = QtWidgets.QComboBox(self.centralwidget)
self.envList.setGeometry(QtCore.QRect(130, 170, 220, 30))
self.envList.setObjectName("envList")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(10, 170, 111, 30))
self.label_7.setObjectName("label_7")
self.licList = QtWidgets.QComboBox(self.centralwidget)
self.licList.setGeometry(QtCore.QRect(530, 170, 221, 30))
self.licList.setObjectName("licList")
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(450, 170, 71, 30))
self.label_8.setObjectName("label_8")
self.audList = QtWidgets.QComboBox(self.centralwidget)
self.audList.setGeometry(QtCore.QRect(530, 210, 221, 30))
self.audList.setObjectName("audList")
self.label_9 = QtWidgets.QLabel(self.centralwidget)
self.label_9.setGeometry(QtCore.QRect(450, 210, 71, 30))
self.label_9.setObjectName("label_9")
self.langList = QtWidgets.QComboBox(self.centralwidget)
self.langList.setGeometry(QtCore.QRect(130, 210, 221, 30))
self.langList.setObjectName("langList")
self.label_10 = QtWidgets.QLabel(self.centralwidget)
self.label_10.setGeometry(QtCore.QRect(10, 210, 111, 30))
self.label_10.setObjectName("label_10")
self.label_11 = QtWidgets.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(10, 260, 81, 30))
self.label_11.setObjectName("label_11")
self.classifiers = QtWidgets.QPlainTextEdit(self.centralwidget)
self.classifiers.setGeometry(QtCore.QRect(130, 260, 701, 181))
self.classifiers.setReadOnly(True)
self.classifiers.setObjectName("classifiers")
self.browse_long_desc = QtWidgets.QPushButton(self.centralwidget)
self.browse_long_desc.setGeometry(QtCore.QRect(800, 50, 31, 30))
self.browse_long_desc.setObjectName("browse_long_desc")
self.find_pack = QtWidgets.QPushButton(self.centralwidget)
self.find_pack.setGeometry(QtCore.QRect(780, 90, 51, 30))
self.find_pack.setObjectName("find_pack")
self.licAdd = QtWidgets.QPushButton(self.centralwidget)
self.licAdd.setGeometry(QtCore.QRect(760, 170, 31, 30))
self.licAdd.setObjectName("licAdd")
self.licRem = QtWidgets.QPushButton(self.centralwidget)
self.licRem.setGeometry(QtCore.QRect(800, 170, 31, 30))
self.licRem.setObjectName("licRem")
self.langRem = QtWidgets.QPushButton(self.centralwidget)
self.langRem.setGeometry(QtCore.QRect(400, 210, 31, 30))
self.langRem.setObjectName("langRem")
self.langAdd = QtWidgets.QPushButton(self.centralwidget)
self.langAdd.setGeometry(QtCore.QRect(360, 210, 31, 30))
self.langAdd.setObjectName("langAdd")
self.build = QtWidgets.QPushButton(self.centralwidget)
self.build.setGeometry(QtCore.QRect(740, 450, 91, 30))
self.build.setObjectName("build")
self.label_12 = QtWidgets.QLabel(self.centralwidget)
self.label_12.setGeometry(QtCore.QRect(30, 450, 151, 30))
self.label_12.setStyleSheet("color: rgb(255, 0, 0);\n"
"font-style: italic;\n"
"text-decoration: underline;\n"
"font-size: 13px")
self.label_12.setObjectName("label_12")
self.audRem = QtWidgets.QPushButton(self.centralwidget)
self.audRem.setGeometry(QtCore.QRect(800, 210, 31, 30))
self.audRem.setObjectName("audRem")
self.audAdd = QtWidgets.QPushButton(self.centralwidget)
self.audAdd.setGeometry(QtCore.QRect(760, 210, 31, 30))
self.audAdd.setObjectName("audAdd")
self.envRem = QtWidgets.QPushButton(self.centralwidget)
self.envRem.setGeometry(QtCore.QRect(400, 170, 31, 30))
self.envRem.setObjectName("envRem")
self.envAdd = QtWidgets.QPushButton(self.centralwidget)
self.envAdd.setGeometry(QtCore.QRect(360, 170, 31, 30))
self.envAdd.setObjectName("envAdd")
MainWindow.setCentralWidget(self.centralwidget)
self.actionsss = QtWidgets.QAction(MainWindow)
self.actionsss.setObjectName("actionsss")
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.shortDesc, self.longDesc)
MainWindow.setTabOrder(self.longDesc, self.browse_long_desc)
MainWindow.setTabOrder(self.browse_long_desc, self.dependencies)
MainWindow.setTabOrder(self.dependencies, self.packages)
MainWindow.setTabOrder(self.packages, self.find_pack)
MainWindow.setTabOrder(self.find_pack, self.pyrec)
MainWindow.setTabOrder(self.pyrec, self.keywords)
MainWindow.setTabOrder(self.keywords, self.envList)
MainWindow.setTabOrder(self.envList, self.envAdd)
MainWindow.setTabOrder(self.envAdd, self.envRem)
MainWindow.setTabOrder(self.envRem, self.licList)
MainWindow.setTabOrder(self.licList, self.licAdd)
MainWindow.setTabOrder(self.licAdd, self.licRem)
MainWindow.setTabOrder(self.licRem, self.langList)
MainWindow.setTabOrder(self.langList, self.langAdd)
MainWindow.setTabOrder(self.langAdd, self.langRem)
MainWindow.setTabOrder(self.langRem, self.audList)
MainWindow.setTabOrder(self.audList, self.audAdd)
MainWindow.setTabOrder(self.audAdd, self.audRem)
MainWindow.setTabOrder(self.audRem, self.classifiers)
MainWindow.setTabOrder(self.classifiers, self.build)
# making combo and initial
self.envList.addItems(__enviroments__)
self.audList.addItems(__intended_audience__)
self.licList.addItems(__license__)
self.langList.addItems(__programming_lang__)
self.classifiers.appendPlainText(self.data.dev_status)
# binding buttons
self.build.clicked.connect(self.builder)
self.find_pack.clicked.connect(self.findall)
self.browse_long_desc.clicked.connect(self.browse)
self.audAdd.clicked.connect(self.addAud)
self.audRem.clicked.connect(self.remAud)
self.envAdd.clicked.connect(self.addEnv)
self.envRem.clicked.connect(self.remEnv)
self.langAdd.clicked.connect(self.addLang)
self.langRem.clicked.connect(self.remLang)
self.licAdd.clicked.connect(self.addLic)
self.licRem.clicked.connect(self.remLic)
self.shortDesc.setPlaceholderText("Enter short description")
self.longDesc.setPlaceholderText(
"Enter long description file path or browse. (Markdown file required)"
)
self.keywords.setPlaceholderText("Enter keywords for SEO")
self.dependencies.setPlaceholderText(
"Enter dependencies (separate by comma)")
self.pyrec.setPlaceholderText("Enter python version requirements")
self.packages.setPlaceholderText(
"Enter python packages to be included or click Find button")
pass
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(
_translate("MainWindow",
"Morphine :: Minimal + Dependencies :: Maker"))
self.label.setText(_translate("MainWindow", "Short Description"))
self.label_2.setText(_translate("MainWindow", "Long Description"))
self.label_3.setText(_translate("MainWindow", "Dependecies"))
self.label_4.setText(_translate("MainWindow", "Packages"))
self.label_5.setText(_translate("MainWindow", "Keywords"))
self.label_6.setText(_translate("MainWindow", "Python Required"))
self.label_7.setText(_translate("MainWindow", "Enviroment"))
self.label_8.setText(_translate("MainWindow", "License"))
self.label_9.setText(_translate("MainWindow", "Audience"))
self.label_10.setText(_translate("MainWindow", "Prog. Language"))
self.label_11.setText(_translate("MainWindow", "Classifiers"))
self.browse_long_desc.setText(_translate("MainWindow", "..."))
self.find_pack.setText(_translate("MainWindow", "Find"))
self.licAdd.setText(_translate("MainWindow", "+"))
self.licRem.setText(_translate("MainWindow", "-"))
self.langRem.setText(_translate("MainWindow", "-"))
self.langAdd.setText(_translate("MainWindow", "+"))
self.build.setText(_translate("MainWindow", "&Build"))
self.label_12.setText(
_translate("MainWindow", "All fields are mandatory"))
self.audRem.setText(_translate("MainWindow", "-"))
self.audAdd.setText(_translate("MainWindow", "+"))
self.envRem.setText(_translate("MainWindow", "-"))
self.envAdd.setText(_translate("MainWindow", "+"))
self.shortDesc.setFocus()
pass
def browse(self):
file = str(
QtWidgets.QFileDialog.getOpenFileName(
self.obj, "Select Long Description", ".",
"Markdown Files (*MD *md)")[0])
self.longDesc.setText(file)
pass
def findall(self):
self.packages.setText(", ".join(find_packages()))
def addAud(self):
current = "Intended Audience :: " + self.audList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
if current in classifier:
return None
classifier.append(current)
self.classifiers.setPlainText("\n".join(classifier))
pass
def remAud(self):
current = "Intended Audience :: " + self.audList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
try:
classifier.remove(current)
except ValueError:
pass
self.classifiers.setPlainText("\n".join(classifier))
pass
def addLang(self):
current = "Programming Language :: " + self.langList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
if current in classifier:
return None
classifier.append(current)
self.classifiers.setPlainText("\n".join(classifier))
pass
def remLang(self):
current = "Programming Language :: " + self.langList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
try:
classifier.remove(current)
except ValueError:
pass
self.classifiers.setPlainText("\n".join(classifier))
pass
def addEnv(self):
current = "Environment :: " + self.envList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
if current in classifier:
return None
classifier.append(current)
self.classifiers.setPlainText("\n".join(classifier))
pass
def remEnv(self):
current = "Environment :: " + self.envList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
try:
classifier.remove(current)
except ValueError:
pass
self.classifiers.setPlainText("\n".join(classifier))
pass
def addLic(self):
current = "License :: " + self.licList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
if current in classifier:
return None
classifier.append(current)
self.classifiers.setPlainText("\n".join(classifier))
pass
def remLic(self):
current = "License :: " + self.licList.currentText()
classifier = self.classifiers.toPlainText().split("\n")
try:
classifier.remove(current)
except ValueError:
pass
self.classifiers.setPlainText("\n".join(classifier))
pass
def builder(self):
shortDesc = self.shortDesc.text()
if shortDesc == "":
QtWidgets.QMessageBox.warning(self.obj, "Empty Field",
"Short description is empty")
self.shortDesc.setFocus()
return None
longDesc = self.longDesc.text()
if longDesc == "":
QtWidgets.QMessageBox.warning(self.obj, "Empty Fields",
"Long description is empty")
self.shortDesc.setFocus()
return None
elif not os.path.exists(longDesc):
QtWidgets.QMessageBox.warning(self.obj, "Not Exitsts",
"Long description file not exists")
self.longDesc.setFocus()
return None
packages = self.packages.text()
if packages == "":
QtWidgets.QMessageBox.warning(self.obj, "Empty Fields",
"Packages are empty")
self.packages.setFocus()
return None
else:
packages = [x.strip() for x in packages.split(",")]
keywords = self.keywords.text()
if keywords == "":
QtWidgets.QMessageBox.warning(self.obj, "Empty Fields",
"Keywords are empty")
self.keywords.setFocus()
return None
pyreq = self.pyrec.text()
if pyreq == "":
QtWidgets.QMessageBox.warning(self.obj, "Empty Fields",
"Python version required is empty")
self.pyrec.setFocus()
return None
deps = self.dependencies.text()
if deps == "":
QtWidgets.QMessageBox.warning(self.obj, "Empty Fields",
"Python version required is empty")
self.dependencies.setFocus()
return None
else:
deps = [x.strip() for x in deps.split(",")]
classifiers = self.classifiers.toPlainText().split("\n")
setup = FormatCode(
template.format(
name=self.data.name,
packages=packages,
version=self.data.version,
auth_name=self.data.authorname,
auth_email=self.data.authoremail,
home_url=self.data.home_url,
down_url=self.data.down_url,
short_desc=shortDesc,
long_desc=longDesc,
license=self.data.license,
keywords=keywords,
classifiers=classifiers,
python_required=pyreq,
install_requires=deps),
style_config="pep8")[0]
with open(os.path.join(self.data.dir, "setup.py"), "w") as file:
file.write(setup)
file.close()
QtWidgets.QMessageBox.information(
self.obj, "Done", "Hurry ^_^\nSetup file has been created")
pass
|
the-stack_106_19490
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DeleteIpWhiteItemRequest(JDCloudRequest):
"""
删除一条IP白名单记录
"""
def __init__(self, parameters, header=None, version="v1"):
super(DeleteIpWhiteItemRequest, self).__init__(
'/regions/{regionId}/instances/{insId}/ipwhitelist', 'DELETE', header, version)
self.parameters = parameters
class DeleteIpWhiteItemParameters(object):
def __init__(self, regionId, insId, cidr):
"""
:param regionId: 地域 Id
:param insId: 审计实例ID
:param cidr: IP白名单记录,支持掩码
"""
self.regionId = regionId
self.insId = insId
self.cidr = cidr
|
the-stack_106_19491
|
import logging
from typing import Tuple, Optional
from flask import current_app
import redis
from redis import Redis
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from backend.database.objects import DBObjectBase
logger = logging.getLogger(__name__)
def login(connection_string, recreate_database=False) -> Tuple[create_engine, sessionmaker]:
print(connection_string)
engine = create_engine(connection_string, echo=False)
if recreate_database:
conn = engine.connect()
conn.execute("commit")
conn.execute("create database saltie")
conn.close()
engine = create_engine(connection_string + '/saltie', echo=False)
DBObjectBase.metadata.create_all(engine)
session = sessionmaker(bind=engine)
return engine, session
def startup() -> sessionmaker:
try:
# Sql Stuff
connection_string = 'postgresql:///saltie'
engine, session = login(connection_string)
except OperationalError as e:
print('trying backup info', e)
try:
engine, session = login('postgresql://postgres:postgres@localhost/saltie')
except Exception as e:
engine, session = login('postgresql://postgres:postgres@localhost', recreate_database=True)
return session
def get_current_session():
return EngineStartup.get_current_session()
stored_session: sessionmaker = None
stored_redis = None
redis_attempted = False
def lazy_startup():
global stored_session
if stored_session is not None:
return stored_session
stored_session = EngineStartup.startup()
return stored_session
def lazy_get_redis():
global stored_redis
global redis_attempted
if stored_redis is not None or redis_attempted:
return stored_redis
stored_redis = EngineStartup.get_redis()
redis_attempted = True
return stored_redis
def get_strict_redis():
return EngineStartup.get_strict_redis()
# session getting
class EngineStartup:
@staticmethod
def login_db() -> Tuple[any, sessionmaker]:
try:
# Sql Stuff
connection_string = 'postgresql:///saltie'
engine, session = login(connection_string)
except OperationalError as e:
print('trying backup info', e)
try:
engine, session = login('postgresql://postgres:postgres@localhost/saltie')
except Exception as e:
engine, session = login('postgresql://postgres:postgres@localhost', recreate_database=True)
return engine, session
@staticmethod
def startup() -> sessionmaker:
_, session = EngineStartup.login_db()
return session
@staticmethod
def get_redis() -> Optional[Redis]:
try:
_redis = Redis(
host='localhost',
port=6379)
_redis.get('test') # Make Redis try to actually use the connection, to generate error if not connected.
return _redis
except: # TODO: Investigate and specify this except.
logger.error("Not using redis.")
return None
@staticmethod
def get_strict_redis():
return redis.StrictRedis()
@staticmethod
def get_current_session():
try:
return current_app.config['db']()
except:
_session = lazy_startup()
return _session()
|
the-stack_106_19493
|
"""This config file is just a dictionnary of useful values."""
# Author: Juju
import os
from pathlib import Path
import dotmap
PATHS = dotmap.DotMap
DATA = dotmap.DotMap
OS = os.name
PATHS.lib = str(Path(os.path.abspath(__file__)).parent.parent)
# replace ~ by the full path name
PATHS.home = os.path.expanduser('~')
# TRIP DATA PATHS
if OS == "posix":
# for the unix root, you should create a symbolic link to the data
# folder by ln -s /media/sf_R_DRIVE/.../ ~/dev/data/
# you can alternatively copy the data there
PATHS.data = PATHS.home + '/dev/data/'
if not os.path.exists(PATHS.data):
Warning('# Please create a symbolic link to data in '
'directory ' + PATHS.data + '!')
PATHS.data = \
'/media/sf_R_DRIVE/dev/data'
else:
raise Exception('# NOT IMPLEMENTED FOR WINDOWS, MODIFY THE SCRIPT !')
PATHS.data = '???'
PATHS.trips = PATHS.data + 'trip_data/'
|
the-stack_106_19497
|
"""
Monkeypatches for the Content class in Pelican, which has some assumptions that
it is working with HTML.
"""
import os
import re
import logging
from html import unescape
from urllib.parse import urlparse, urlunparse, urljoin, unquote
logger = logging.getLogger(__name__)
def _get_intrasite_link_regex(self):
intrasite_link_regex = self.settings['INTRASITE_LINK_REGEX']
regex = r"(?P<markup>=> )(?P<quote>)(?P<path>{}(?P<value>[\S]*))".format(intrasite_link_regex)
return re.compile(regex)
# Wrapper around urljoin so that gemini protocol base won't be rejected
def _urljoin(base, url, *args, **kwargs):
is_gemini = base.startswith('gemini://')
if is_gemini:
base = base.replace('gemini://', 'https://')
result = urljoin(base, url, *args, **kwargs)
if is_gemini:
result = result.replace('https://', 'gemini://')
return result
def _link_replacer(self, siteurl, m):
what = m.group('what')
value = urlparse(m.group('value'))
path = value.path
origin = m.group('path')
# urllib.parse.urljoin() produces `a.html` for urljoin("..", "a.html")
# so if RELATIVE_URLS are enabled, we fall back to os.path.join() to
# properly get `../a.html`. However, os.path.join() produces
# `baz/http://foo/bar.html` for join("baz", "http://foo/bar.html")
# instead of correct "http://foo/bar.html", so one has to pick a side
# as there is no silver bullet.
if self.settings['RELATIVE_URLS']:
joiner = os.path.join
else:
joiner = _urljoin
# However, it's not *that* simple: urljoin("blog", "index.html")
# produces just `index.html` instead of `blog/index.html` (unlike
# os.path.join()), so in order to get a correct answer one needs to
# append a trailing slash to siteurl in that case. This also makes
# the new behavior fully compatible with Pelican 3.7.1.
if not siteurl.endswith('/'):
siteurl += '/'
# XXX Put this in a different location.
if what in {'filename', 'static', 'attach'}:
def _get_linked_content(key, url):
nonlocal value
def _find_path(path):
if path.startswith('/'):
path = path[1:]
else:
# relative to the source path of this content
path = self.get_relative_source_path(
os.path.join(self.relative_dir, path)
)
return self._context[key].get(path, None)
# try path
result = _find_path(url.path)
if result is not None:
return result
# try unquoted path
result = _find_path(unquote(url.path))
if result is not None:
return result
# try html unescaped url
unescaped_url = urlparse(unescape(url.geturl()))
result = _find_path(unescaped_url.path)
if result is not None:
value = unescaped_url
return result
# check if a static file is linked with {filename}
if what == 'filename' and key == 'generated_content':
linked_content = _get_linked_content('static_content', value)
if linked_content:
logger.warning(
'{filename} used for linking to static'
' content %s in %s. Use {static} instead',
value.path,
self.get_relative_source_path())
return linked_content
return None
if what == 'filename':
key = 'generated_content'
else:
key = 'static_content'
linked_content = _get_linked_content(key, value)
if linked_content:
if what == 'attach':
linked_content.attach_to(self)
origin = joiner(siteurl, linked_content.url)
origin = origin.replace('\\', '/') # for Windows paths.
else:
logger.warning(
"Unable to find '%s', skipping url replacement.",
value.geturl(), extra={
'limit_msg': ("Other resources were not found "
"and their urls not replaced")})
elif what == 'category':
origin = joiner(siteurl, Category(path, self.settings).url)
elif what == 'tag':
origin = joiner(siteurl, Tag(path, self.settings).url)
elif what == 'index':
origin = joiner(siteurl, self.settings['INDEX_SAVE_AS'])
elif what == 'author':
origin = joiner(siteurl, Author(path, self.settings).url)
else:
logger.warning(
"Replacement Indicator '%s' not recognized, "
"skipping replacement",
what)
# keep all other parts, such as query, fragment, etc.
parts = list(value)
parts[2] = origin
origin = urlunparse(parts)
return ''.join((m.group('markup'), m.group('quote'), origin,
m.group('quote')))
# Could maybe use the content_object_init signal to perform this patching
# Not sure there is any advantage though
def _patch_content():
from pelican.contents import Content
# This method is tied intimately to html. I don't see any legit mechanism
# to change its behaviour in a plugin, so we have to monkeypatch it.
Content._get_intrasite_link_regex = _get_intrasite_link_regex
# This method has a problem in how it joins URLs - urllib doesn't know
# about gemini, and just ignores the siteurl
Content._link_replacer = _link_replacer
|
the-stack_106_19504
|
from typing import Tuple, List, Optional
def swap_sum(arr_a: List[int], arr_b: List[int]) -> Optional[Tuple]:
sum_a = sum(arr_a)
sum_b = sum(arr_b)
# sum_a - a + b = sum_b - b + a
# a - b = (sum_a - sum_b) / 2
diff = sum_a - sum_b
if diff % 2 != 0:
return None
target = int(diff / 2)
for i in arr_a:
for j in arr_b:
if i - j == target:
return i, j
return None
def __check_swap(swap: Tuple, arr_a: List[int], arr_b: List[int]):
assert sum(arr_a) - swap[0] + swap[1] == sum(arr_b) - swap[1] + swap[0]
if __name__ == '__main__':
a = [4, 1, 2, 1, 1, 2]
b = [3, 6, 3, 3]
__check_swap(swap_sum(a, b), a, b)
|
the-stack_106_19505
|
"""Implementation of SPNEGO wsgi middleware."""
import functools
import logging
import re
import base64
import gssapi
from werkzeug import local
from werkzeug.wrappers import BaseRequest, BaseResponse
_LOGGER = logging.getLogger(__name__)
_AUTH_ERROR = functools.partial(BaseResponse, status=500)
_FORBIDDEN = functools.partial(BaseResponse, status=403)
_UNAUTHORIZED = functools.partial(BaseResponse, status=401)
def wrap(wsgi_app, protect):
"""Wrap FLASK app in webauthd middleware."""
_LOGGER.info('Loading spnego auth.')
unprotected_paths = ['/']
unprotected_methods = ['OPTIONS']
protected_paths = []
if protect:
protected_paths.extend(protect)
local_manager = local.LocalManager([SpnegoAuth.LOCALS])
app = SpnegoAuth(
wsgi_app,
protected_paths=protected_paths,
unprotected_paths=unprotected_paths,
unprotected_methods=unprotected_methods,
unprotected_is_regex=False
)
return local_manager.make_middleware(app)
class SpnegoAuth:
"""SPNEGO authentication implementation."""
LOCALS = local.Local()
def __init__(self, wrapped,
protected_paths=(), unprotected_paths=(),
unprotected_methods=(),
unprotected_is_regex=False):
"""
:param protected_paths:
Provide a list of path for which this module should enforce auth
(e.g. '/login')
:param wad_cnx_settings:
Tuple connection settings the WebAuthD daemon (e.g. ('inet',
[port]) or ('unix', [path]))
:param unprotected_is_regex:
Whether unprotected_paths parameter contains regexes (default:
False)
"""
self._wrapped = wrapped
# build a re like this '^(/path1(/.*)?|/path2(/.*)?|/path3(/.*)?)$'
# Note that empty protected_paths matches *EVERY* paths
self.protected_paths_re = re.compile(
r'^(:?' +
r'(:?/.*)?|'.join(protected_paths) +
r'(:?/.*)?)$'
)
if unprotected_paths:
if not unprotected_is_regex:
unprotected_paths = [
path + '(:?/.*)?' for path in unprotected_paths
]
self.unprotected_paths_re = re.compile(
r'^(:?' +
r'|'.join(unprotected_paths) +
r')$'
)
else:
self.unprotected_paths_re = None
self.unprotected_methods = unprotected_methods
# Print WebAuthD version for information
_LOGGER.info('Protecting paths: %r',
self.protected_paths_re.pattern)
_LOGGER.info('Unprotecting paths: %r',
(self.unprotected_paths_re.pattern
if self.unprotected_paths_re is not None
else None))
_LOGGER.info('Unprotecting methods: %r', self.unprotected_methods)
def _wrapped_authenticated(self, auth_user, auth_token=None):
def _wrapped(environ, start_response):
environ['REMOTE_USER'] = auth_user
# TODO: when is auth token every none?
if auth_token:
def spnego_start_response(status, headers, exc_info=None):
"""Initial spnego response."""
headers.append(
('WWW-Authenticate', 'Negotiate %s' % auth_token)
)
return start_response(status, headers, exc_info)
else:
spnego_start_response = start_response
return self._wrapped(environ, spnego_start_response)
return _wrapped
def _auth_spnego(self, request):
"""Perform SPNEGO authentication.
"""
if 'Authorization' not in request.headers:
# Send the SPNEGO Negociate challenge
_LOGGER.debug("Sending SPNEGO Negotiate request")
resp = BaseResponse(status=401)
resp.headers['WWW-Authenticate'] = 'Negotiate'
return resp
# We have authorization headers
auth_type, auth_chal = request.headers['Authorization'].split(' ', 3)
if auth_type != 'Negotiate':
return _UNAUTHORIZED('Invalid authorization header.')
_LOGGER.debug("Received SPNEGO Negociate token: %r", auth_chal)
try:
if not hasattr(self.LOCALS, 'ctx'):
# pylint: disable=assigning-non-slot
self.LOCALS.ctx = gssapi.SecurityContext(
creds=None, usage='accept'
)
_LOGGER.debug('Init security context.')
in_token = base64.standard_b64decode(auth_chal)
out_token = self.LOCALS.ctx.step(in_token)
auth_token = base64.b64encode(out_token)
if not self.LOCALS.ctx.complete:
_LOGGER.debug("Sending SPNEGO Negotiate (continue).")
resp = BaseResponse(status=401)
resp.headers['WWW-Authenticate'] = 'Negotiate %s' % auth_token
return resp
# GSSAPI negotiation completed.
auth_user = str(self.LOCALS.ctx.initiator_name)
_LOGGER.info('Authenticated user: %s', auth_user)
return self._wrapped_authenticated(auth_user, auth_token)
# pylint: disable=c-extension-no-member
except gssapi.raw.misc.GSSError as err:
_LOGGER.warning('Unhandled exception: %s', str(err))
return _UNAUTHORIZED(str(err))
def wsgi_app(self, environ, start_response):
"""WSGI middleware main entry point.
"""
request = BaseRequest(environ)
if request.method in self.unprotected_methods:
return self._wrapped(environ, start_response)
if not self.protected_paths_re.match(request.path):
return self._wrapped(environ, start_response)
if (self.unprotected_paths_re is not None and
self.unprotected_paths_re.match(request.path)):
return self._wrapped(environ, start_response)
_LOGGER.info('Authenticating access to %s', request.path)
app = self._auth_spnego(request)
return app(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
|
the-stack_106_19507
|
# Copyright (C) 2018 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from phonopy.api_phonopy import Phonopy
from phonopy.interface.phonopy_yaml import PhonopyYaml
from phonopy.interface.calculator import get_default_physical_units
import phonopy.cui.load_helper as load_helper
from phonopy.structure.cells import get_primitive_matrix
def load(phonopy_yaml=None, # phonopy.yaml-like must be the first argument.
supercell_matrix=None,
primitive_matrix=None,
is_nac=True,
calculator=None,
unitcell=None,
supercell=None,
nac_params=None,
unitcell_filename=None,
supercell_filename=None,
born_filename=None,
force_sets_filename=None,
force_constants_filename=None,
fc_calculator=None,
fc_calculator_options=None,
factor=None,
frequency_scale_factor=None,
produce_fc=True,
is_symmetry=True,
symmetrize_fc=True,
is_compact_fc=True,
symprec=1e-5,
log_level=0):
"""Create Phonopy instance from parameters and/or input files.
"phonopy_yaml"-like file is parsed unless crystal structure information
is given by unitcell_filename, supercell_filename, unitcell
(PhonopyAtoms-like), or supercell (PhonopyAtoms-like).
Even when "phonopy_yaml"-like file is parse, parameters except for
crystal structure can be overwritten.
Phonopy default files of 'FORCE_SETS' and 'BORN' are parsed when they
are found in current directory and those data are not yet provided by
other means.
Crystal structure
-----------------
Means to provide crystal structure(s) and their priority:
1. unitcell_filename (with supercell_matrix)
2. supercell_filename
3. unitcell (with supercell_matrix)
4. supercell.
5. phonopy_yaml
Force sets or force constants
-----------------------------
Optional. Means to provide information to generate force constants
and their priority:
1. force_constants_filename
2. force_sets_filename
3. phonopy_yaml if force constants are found in phonoy_yaml.
4. phonopy_yaml if forces are found in phonoy_yaml.dataset.
5. 'FORCE_CONSTANTS' is searched in current directory.
6. 'force_constants.hdf5' is searched in current directory.
7. 'FORCE_SETS' is searched in current directory.
When both of 3 and 4 are satisfied but not others, force constants and
dataset are stored in Phonopy instance, but force constants are not
produced from dataset.
Parameters for non-analytical term correctiion (NAC)
----------------------------------------------------
Optional. Means to provide NAC parameters and their priority:
1. born_filename
2. nac_params
3. phonopy_yaml.nac_params if existed and is_nac=True.
4. 'BORN' is searched in current directory when is_nac=True.
Parameters
----------
phonopy_yaml : str, optional
Filename of "phonopy.yaml"-like file. If this is given, the data
in the file are parsed. Default is None.
supercell_matrix : array_like, optional
Supercell matrix multiplied to input cell basis vectors.
shape=(3, ) or (3, 3), where the former is considered a diagonal
matrix. Default is the unit matrix.
dtype=int
primitive_matrix : array_like or str, optional
Primitive matrix multiplied to input cell basis vectors. Default is
None, which is equivalent to 'auto'.
For array_like, shape=(3, 3), dtype=float.
When 'F', 'I', 'A', 'C', or 'R' is given instead of a 3x3 matrix,
the primitive matrix for the character found at
https://spglib.github.io/spglib/definition.html
is used.
is_nac : bool, optional
If True, look for 'BORN' file. If False, NAS is turned off.
Default is True.
calculator : str, optional.
Calculator used for computing forces. This is used to switch the set
of physical units. Default is None, which is equivalent to "vasp".
unitcell : PhonopyAtoms, optional
Input unit cell. Default is None.
supercell : PhonopyAtoms, optional
Input supercell. With given, default value of primitive_matrix is set
to 'auto' (can be overwitten). supercell_matrix is ignored. Default is
None.
nac_params : dict, optional
Parameters required for non-analytical term correction. Default is
None.
{'born': Born effective charges
(array_like, shape=(primitive cell atoms, 3, 3), dtype=float),
'dielectric': Dielectric constant matrix
(array_like, shape=(3, 3), dtype=float),
'factor': unit conversion facotr (float)}
unitcell_filename : str, optional
Input unit cell filename. Default is None.
supercell_filename : str, optional
Input supercell filename. When this is specified, supercell_matrix is
ignored. Default is None.
born_filename : str, optional
Filename corresponding to 'BORN', a file contains non-analytical term
correction parameters.
force_sets_filename : str, optional
Filename of a file corresponding to 'FORCE_SETS', a file contains sets
of forces and displacements. Default is None.
force_constants_filename : str, optional
Filename of a file corresponding to 'FORCE_CONSTANTS' or
'force_constants.hdf5', a file contains force constants. Default is
None.
fc_calculator : str, optional
Force constants calculator. Currently only 'alm'. Default is None.
fc_calculator_options : str, optional
Optional parameters that are passed to the external fc-calculator.
This is given as one text string. How to parse this depends on the
fc-calculator. For alm, each parameter is splitted by comma ',',
and each set of key and value pair is written in 'key = value'.
factor : float, optional
Phonon frequency unit conversion factor. Unless specified, default
unit conversion factor for each calculator is used.
frequency_scale_factor : float, optional
Factor multiplied to calculated phonon frequency. Default is None,
i.e., effectively 1.
produce_fc : bool, optional
Setting False, force constants are not calculated from displacements
and forces. Default is True.
is_symmetry : bool, optional
Setting False, crystal symmetry except for lattice translation is not
considered. Default is True.
symmetrize_fc : bool, optional
Setting False, force constants are not symmetrized when creating
force constants from displacements and forces. Default is True.
is_compact_fc : bool
Force constants are produced in the array whose shape is
True: (primitive, supecell, 3, 3)
False: (supercell, supecell, 3, 3)
where 'supercell' and 'primitive' indicate number of atoms in these
cells. Default is True.
symprec : float, optional
Tolerance used to find crystal symmetry. Default is 1e-5.
log_level : int, optional
Verbosity control. Default is 0.
"""
if (supercell is not None or
supercell_filename is not None or
unitcell is not None or
unitcell_filename is not None):
cell, smat, pmat = load_helper.get_cell_settings(
supercell_matrix=supercell_matrix,
primitive_matrix=primitive_matrix,
unitcell=unitcell,
supercell=supercell,
unitcell_filename=unitcell_filename,
supercell_filename=supercell_filename,
calculator=calculator,
symprec=symprec,
log_level=log_level)
_calculator = calculator
_nac_params = nac_params
_dataset = None
_fc = None
elif phonopy_yaml is not None:
phpy_yaml = PhonopyYaml()
phpy_yaml.read(phonopy_yaml)
cell = phpy_yaml.unitcell
smat = phpy_yaml.supercell_matrix
if smat is None:
smat = np.eye(3, dtype='intc', order='C')
if primitive_matrix is not None:
pmat = get_primitive_matrix(primitive_matrix, symprec=symprec)
else:
pmat = phpy_yaml.primitive_matrix
if nac_params is not None:
_nac_params = nac_params
elif is_nac:
_nac_params = phpy_yaml.nac_params
else:
_nac_params = None
_dataset = phpy_yaml.dataset
_fc = phpy_yaml.force_constants
if calculator is None:
_calculator = phpy_yaml.calculator
else:
_calculator = calculator
else:
msg = ("Cell information could not found. "
"Phonopy instance loading failed.")
raise RuntimeError(msg)
if log_level and _calculator is not None:
print("Set \"%s\" mode." % _calculator)
# units keywords: factor, nac_factor, distance_to_A
units = get_default_physical_units(_calculator)
if factor is None:
_factor = units['factor']
else:
_factor = factor
phonon = Phonopy(cell,
smat,
primitive_matrix=pmat,
factor=_factor,
frequency_scale_factor=frequency_scale_factor,
symprec=symprec,
is_symmetry=is_symmetry,
calculator=_calculator,
log_level=log_level)
# NAC params
if born_filename is not None or _nac_params is not None or is_nac:
ret_nac_params = load_helper.get_nac_params(
primitive=phonon.primitive,
nac_params=_nac_params,
born_filename=born_filename,
is_nac=is_nac,
nac_factor=units['nac_factor'],
log_level=log_level)
if ret_nac_params is not None:
phonon.nac_params = ret_nac_params
# Displacements, forces, and force constants
load_helper.set_dataset_and_force_constants(
phonon,
_dataset,
_fc,
force_constants_filename=force_constants_filename,
force_sets_filename=force_sets_filename,
fc_calculator=fc_calculator,
fc_calculator_options=fc_calculator_options,
produce_fc=produce_fc,
symmetrize_fc=symmetrize_fc,
is_compact_fc=is_compact_fc,
log_level=log_level)
return phonon
|
the-stack_106_19508
|
#!/usr/bin/env python
from mininet.cli import CLI
from mininet.node import Link, Host
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.term import makeTerm
from mininet.topo import Topo
from functools import partial
class VLANHost( Host ):
"Host connected to VLAN interface"
def config( self, vlan=10, **params ):
"""Configure VLANHost according to (optional) parameters:
vlan: VLAN ID for default interface"""
r = super( VLANHost, self ).config( **params )
intf = self.defaultIntf()
# remove IP from default, "physical" interface
self.cmd( 'ifconfig %s inet 0' % intf )
# create VLAN interface
self.cmd( 'vconfig add %s %d' % ( intf, vlan ) )
# assign the host's IP to the VLAN interface
self.cmd( 'ifconfig %s.%d inet %s' % ( intf, vlan, params['ip'] ) )
# update the intf name and host's intf map
newName = '%s.%d' % ( intf, vlan )
# update the (Mininet) interface to refer to VLAN interface name
intf.name = newName
# add VLAN interface to host's name to intf map
self.nameToIntf[ newName ] = intf
return r
class VplsTopo(Topo):
''' VPLS demo Topology '''
def __init__(self):
Topo.__init__(self)
s1 = self.addSwitch('s1')
s2 = self.addSwitch('s2')
s3 = self.addSwitch('s3')
s4 = self.addSwitch('s4')
s5 = self.addSwitch('s5')
s6 = self.addSwitch('s6')
h1 = self.addHost('h1', cls=VLANHost, vlan=100, mac='00:00:00:00:00:01', ip='10.0.0.1/24')
h2 = self.addHost('h2', cls=VLANHost, vlan=200, mac='00:00:00:00:00:02', ip='10.0.0.2/24')
h3 = self.addHost('h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24')
h4 = self.addHost('h4', cls=VLANHost, vlan=400, mac='00:00:00:00:00:04', ip='10.0.0.4/24')
h5 = self.addHost('h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24')
h6 = self.addHost('h6', mac='00:00:00:00:00:06', ip='10.0.0.6/24')
self.addLink(s1, h1, port1=1, port2=0)
self.addLink(s2, h2, port1=1, port2=0)
self.addLink(s3, h3, port1=1, port2=0)
self.addLink(s4, h4, port1=1, port2=0)
self.addLink(s5, h5, port1=1, port2=0)
self.addLink(s6, h6, port1=1, port2=0)
self.addLink(s1, s2)
self.addLink(s2, s3)
self.addLink(s3, s4)
self.addLink(s4, s1)
self.addLink(s4, s2)
self.addLink(s1, s5)
self.addLink(s4, s5)
self.addLink(s2, s6)
self.addLink(s3, s6)
topos = { 'vpls': ( lambda: VplsTopo() ) }
if __name__ == '__main__':
from onosnet import run
run(VplsTopo())
|
the-stack_106_19510
|
from skyfield.api import Topos, load
import math
import random
import logging
import sys
import os
from datetime import datetime, timedelta, timezone
logging.disable(logging.INFO)
#Challenge
#Grab data from local text file for speed and consistency
#-------------------------------------------#
satellites = load.tle('stations.txt')
#-------------------------------------------#
SEED = int(os.getenv("SEED", 0))
random.seed(SEED)
#Calculate random time for player
#-------------------------------------------#
ts = load.timescale()
random_hour = random.randint(1, 23)
random_minute = random.randint(0, 59)
random_second = random.randint(0, 59)
t = ts.utc(2020, 3, 18, random_hour, random_minute, random_second)
#Return dynamically generated time to player
#print(f'DEBUG: {t.utc}') # OLD. Bad, very bad.
print(f'Please use the following time to find the correct satellite:(2020, 3, 18, {random_hour}, {random_minute}, {random_second}.0)')
sys.stdout.flush()
#-------------------------------------------#
#Satellite list from website has both integers and strings and we need only objects utilizing the strings
#-------------------------------------------#
string_only_satellites = []
for satellite in satellites:
if isinstance(satellite, str):
string_only_satellites.append(satellites[satellite])
#-------------------------------------------#
sys.stderr.write("Number of Satellites: %d\n" % len(string_only_satellites))
#Generate random satellite coordinates for player
#-------------------------------------------#
while True:
random_satellite_number = random.randint(0, len(string_only_satellites) - 1)
player_satellite_for_input_solver = string_only_satellites[random_satellite_number]
string_only_satellites.pop(random_satellite_number)
#Create first topocentric for comparison, testing to ensure satellites are not in a collision path
bluffton = Topos('36.17237297 N','-115.13431754 W')
difference = player_satellite_for_input_solver - bluffton
#The result will be the position of the satellite relative to you as an observer (bluffton vairable).
topocentric = difference.at(t)
restart = False
for satellite in string_only_satellites:
bluffton = Topos('36.17237297 N','-115.13431754 W')
difference = satellite - bluffton
#The result will be the position of the satellite relative to you as an observer (bluffton vairable).
topocentric2 = difference.at(t)
difference_km = (topocentric2 - topocentric).distance().km
if difference_km < 10.0:
#Another satellite is within 10km
restart = True
sys.stderr.write("Satellite too close: %d\n" % difference_km)
break
if restart:
continue
sys.stderr.write("Using Satellite: %s\n" % player_satellite_for_input_solver)
sys.stderr.flush()
geocentric = player_satellite_for_input_solver.at(t)
coordinates = geocentric.position.km.tolist()
#Return dynamically generated geocentric coordinates to player
print(f'Please use the following Earth Centered Inertial reference frame coordinates to find the satellite:{coordinates}')
sys.stdout.flush()
break
#-------------------------------------------#
#Player Input Solver
#-------------------------------------------#
random_times = []
r_times_pretty = []
amount_of_times = 0
random.seed(datetime.now())
while True:
if amount_of_times < 3:
amount_of_times = amount_of_times + 1
ts = load.timescale()
random_hour = random.randint(1, 23)
random_minute = random.randint(0, 59)
random_second = random.randint(0, 59)
random_times.append(ts.utc(2020, 3, 18, random_hour, random_minute, random_second))
r_times_pretty.append(f'(2020, 3, 18, {random_hour}, {random_minute}, {random_second}.0)')
else:
break
#Upped it to 5 to give the users a couple of attempts if they accidently fat fingered their input or something
times_allowed = 0
while True:
try:
if times_allowed < 5:
print(f'Current attempt:{times_allowed + 1}')
sys.stdout.flush()
times_allowed = times_allowed + 1
geocentric = player_satellite_for_input_solver.at(random_times[0])
coordinates = geocentric.position.km.tolist()
print(f"What is the X coordinate at the time of:{r_times_pretty[0]}?")
sys.stdout.flush()
line = sys.stdin.readline().strip()
user_input_coordinate = float(line)
if isinstance(user_input_coordinate, float):
if math.isclose(user_input_coordinate, coordinates[0], rel_tol=1e-2):
sys.stderr.write(f'The X coordinate for {r_times_pretty[0]} is correct!')
sys.stdout.flush()
else:
print(f"DEBUG: {coordinates[0]}")
print(f'{user_input_coordinate} is incorrect, please try again and enter the the X coordinate for the satellite at {r_times_pretty[0]}.')
sys.stdout.flush()
continue
else:
print('Your input must be a floating point coordinate (J2000) in KM')
sys.stdout.flush()
continue
print(f"What is the Y coordinate at the time of:{r_times_pretty[0]}?")
sys.stdout.flush()
line = sys.stdin.readline().strip()
user_input_coordinate = float(line)
if isinstance(user_input_coordinate, float):
if math.isclose(user_input_coordinate, coordinates[1], rel_tol=1e-2):
print(f'The Y coordinate for {r_times_pretty[0]} is correct!')
sys.stdout.flush()
else:
print(f'{user_input_coordinate} is incorrect, please try again and enter the the Y coordinate for the satellite at {r_times_pretty[0].utc}.')
sys.stdout.flush()
continue
else:
print('Your input must be a floating point coordinate (J2000) in KM')
sys.stdout.flush()
continue
print(f"What is the Z coordinate at the time of:{r_times_pretty[0]}?")
sys.stdout.flush()
line = sys.stdin.readline().strip()
user_input_coordinate = float(line)
if isinstance(user_input_coordinate, float):
if math.isclose(user_input_coordinate, coordinates[2], rel_tol=1e-1):
print(f'The Z axis coordinate for {r_times_pretty[0]} is correct!')
sys.stdout.flush()
random_times.pop(0)
r_times_pretty.pop(0)
#Check to see if there are any more times in the list to be solved.
if random_times:
continue
else:
flag = os.getenv("FLAG", "FLAG{Placeholder}")
if flag:
print(flag)
sys.stdout.flush()
else:
print("This is to be removed but for final testing - !!!!FLAG_PLACEHOLDER!!!!")
sys.stdout.flush()
break
else:
print(f'{user_input_coordinate} is incorrect, please try again and enter the the Z coordinate for the satellite at {r_times_pretty[0].utc}.')
sys.stdout.flush()
continue
else:
print('Your input must be a floating point coordinate (J2000) in KM')
sys.stdout.flush()
continue
else:
break
except:
import sys
sys.exc_info()
if times_allowed == 5:
print('Please try again!')
sys.stdout.flush()
else:
print(f'Please enter the proper coordinate for the satellite at {r_times_pretty[0]}.')
sys.stdout.flush()
#-------------------------------------------#
|
the-stack_106_19512
|
import sys
from datetime import datetime
from threading import RLock
from .CacheStorage import CachedItem, ItemNotCached
from .MemoryStorage import MemoryStorage
class ItemExpired(ItemNotCached): pass
class LRUCache:
'''
Collection of data where data may be removed to make room
Each peace of data is indexed by a unique key.
Least Recent Used implies that when room is needed in the
collection, whatever key has been accessed least recently
is silently removed from the collection.
Actual storage of the data depends on the storage object
attached, and defaults to in-memory (MemoryStorage)
'''
def __init__(self, storage=None, max_size=None, sizeof=None, max_age=None):
'''
:param storage: Storage for data (CacheStorage)
:param max_size: Maximum size to store in cache
:param sizeof: Function to use for calculating the size of data cached
:param max_age: Max time to hold cached items for (timedelta)
'''
self.storage = storage or MemoryStorage()
self.max_size = max_size
self.__sizeof = sizeof
self.max_age = max_age
self.lock = RLock()
def put(self, key, data, expires_in=None, size=None):
'''
Add an object to the cache
:param key: Key to use to retrieve this item.
:param data: The actual item to cache.
:param expires_in: timedelta to specify when object should expire
:param size: Size of the entry if known (will skip sizeof calc)
:return:
'''
# Determine size of data
if size is None:
if self.__sizeof is not None:
size = self.__sizeof(data)
else:
size = sys.getsizeof(key) + sys.getsizeof(data)
# Time to expire
if expires_in is not None:
expire_after = datetime.now() + expires_in
elif self.max_age is not None:
expire_after = datetime.now() + self.max_age
else:
expire_after = None
item = CachedItem(data, size=size, expires=expire_after)
# Manipulate storage
with self.lock:
# Remove item if already exists
if self.storage.has_key(key):
self._remove_item_from_storage(key)
# Sanity check: Data too big for storage
if self.max_size is not None and size > self.max_size:
return
# Make sure there is space
if self.max_size is not None:
self.make_room_for(size)
# Save item
self.storage.add(key, item)
def get(self, key):
return self[key]
def __getitem__(self, key):
'''Get data from cache'''
with self.lock:
item = self.storage.get(key)
if item.expires_at is not None and item.expires_at < datetime.now():
self.remove(key)
raise ItemExpired()
self.storage.touch_last_used(key)
return item.data
def __setitem__(self, key, data):
'''Add item to the cache'''
self.put(key, data)
def keys(self):
with self.lock:
return self.storage.keys()
def items(self):
with self.lock:
for key, item in self.storage.items():
yield key, item.data
def _remove_item_from_storage(self, key):
'''
Remove an item from storage
Intended for internal use. No state checking
'''
with self.lock:
self.storage.remove(key)
def __delitem__(self, key):
self._remove_item_from_storage(key)
def remove(self, key):
self._remove_item_from_storage(key)
@property
def num_items(self):
return self.storage.num_items
def close(self):
with self.lock:
self.storage.close()
self.storage = None
def clean_expired(self):
'''Clean old entries out of cache'''
with self.lock:
for key, item in self.storage.expired_items():
self.remove(key)
@property
def total_size_stored(self):
return self.storage.total_size_stored
def make_room_for(self, size):
'''
Make room for a new item of the given size
Note: Possible race condition if storage supports multiple LRUCache objects
in separate processes and called concurrently. Solve this in storage
engine implementation if needed.
:param size: Size of the new object coming in
:param max_size: Size limit for the cache storage
'''
with self.lock:
if self.max_size > 0 and size > 0:
while self.storage.total_size_stored + size > self.max_size:
self.storage.pop_oldest()
|
the-stack_106_19513
|
# encoding=utf-8
import numpy as np
import math
import sys
import os
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'utils'))
# add for shape-preserving Loss
from lib.pointops.functions import pointops
from datasets_4point import PartDataset,ModelNetDataset
from collections import namedtuple
# from pointnet2.pointnet2_modules import PointNet2SAModule, PointNet2SAModuleMSG
from utils import chamfer_loss
cudnn.benchnark=True
class PDGN_v1(object):
def __init__(self, args):
self.model_name = args.network
self.workers = args.workers
self.checkpoint_dir = args.checkpoint_dir
self.model_dir = args.model_dir
self.data_root = args.data_root
self.pretrain_model_G = args.pretrain_model_G
self.pretrain_model_D = args.pretrain_model_D
# softmax for bilaterl interpolation
if args.softmax == 'True':
self.softmax = True
print('use softmax')
else:
self.softmax = False
print('do not use softmax')
self.epoch = args.max_epoch # 300
self.batch_size = args.batch_size # 50
self.noise_dim = args.noise_dim # 128
self.learning_rate = args.learning_rate # 0.0001
self.num_point = args.num_point # 2048
self.num_k = args.num_k # 20
self.choice = args.choice # which class
self.snapshot = args.snapshot # 20 epochs / one
self.savename = args.savename
if self.choice is None:
self.category = 'full'
else:
self.category = self.choice
self.chamfer_loss = chamfer_loss.ChamferLoss()
if args.dataset == 'shapenet':
print('-------------use dataset shapenet-------------')
self.dataset = PartDataset(root=self.data_root, batch_size=self.batch_size, class_choice=self.choice, classification=True)
self.test_dataset = PartDataset(root=self.data_root, batch_size=self.batch_size, class_choice=self.choice, classification=True, train=False)
elif args.dataset == 'modelnet10':
print('-------------use dataset modelnet10-------------')
self.dataset = ModelNetDataset(root=self.data_root, batch_size=self.batch_size, npoints=self.num_point,
split='train', normalize=True, normal_channel=False, modelnet10=True,class_choice=self.choice)
self.test_dataset = ModelNetDataset(root=self.data_root, batch_size=self.batch_size, npoints=self.num_point,
split='test', normalize=True, normal_channel=False, modelnet10=True,class_choice=self.choice)
elif args.dataset == 'modelnet40':
print('-------------use dataset modelnet40-------------')
self.dataset = ModelNetDataset(root=self.data_root, batch_size=self.batch_size, npoints=self.num_point,
split='train', normalize=True, normal_channel=False, modelnet10=False,class_choice=self.choice)
self.test_dataset = ModelNetDataset(root=self.data_root, batch_size=self.batch_size, npoints=self.num_point,
split='test', normalize=True, normal_channel=False, modelnet10=False,class_choice=self.choice)
self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, shuffle=True, num_workers=int(self.workers))
self.num_batches = len(self.dataset) // self.batch_size
self.test_dataloader = torch.utils.data.DataLoader(self.test_dataset, batch_size=self.batch_size, shuffle=True, num_workers=int(self.workers))
self.test_num_batches = len(self.test_dataset) // self.batch_size
if args.phase == 'train':
print('training...')
self.log_info = args.log_info
self.LOG_FOUT = open(os.path.join(self.checkpoint_dir, self.model_dir, self.log_info), 'w')
self.LOG_FOUT.write(str(args)+'\n')
elif args.phase == 'test':
print('testing...')
elif args.phase == 'cls':
print('extract feature')
cudnn.benchmark = True # cudnn
def build_model(self):
""" Models """
self.generator = PointGenerator(self.num_point, self.num_k, self.softmax)
self.discriminator1 = PointDiscriminator_1()
self.discriminator2 = PointDiscriminator_2()
self.discriminator3 = PointDiscriminator_3()
self.discriminator4 = PointDiscriminator_4()
self.generator = nn.DataParallel(self.generator)
self.discriminator1 = nn.DataParallel(self.discriminator1)
self.discriminator2 = nn.DataParallel(self.discriminator2)
self.discriminator3 = nn.DataParallel(self.discriminator3)
self.discriminator4 = nn.DataParallel(self.discriminator4)
self.generator.cuda()
self.discriminator1.cuda()
self.discriminator2.cuda()
self.discriminator3.cuda()
self.discriminator4.cuda()
""" Loss Function """
#self.group = pointops.Gen_QueryAndGroupXYZ(radius=0.1, nsample=10, use_xyz=False)
self.group = pointops.Gen_QueryAndGroupXYZ(radius=None, nsample=20, use_xyz=False)
self.loss_fn = nn.MSELoss()
self.shape_loss_fn = nn.MSELoss()
""" Training """
self.optimizerG = optim.Adam(self.generator.parameters(), lr=self.learning_rate, betas=(0.5, 0.999))
self.optimizerD1 = optim.Adam(self.discriminator1.parameters(), lr=self.learning_rate, betas=(0.5, 0.999))
self.optimizerD2 = optim.Adam(self.discriminator2.parameters(), lr=self.learning_rate, betas=(0.5, 0.999))
self.optimizerD3 = optim.Adam(self.discriminator3.parameters(), lr=self.learning_rate, betas=(0.5, 0.999))
self.optimizerD4 = optim.Adam(self.discriminator4.parameters(), lr=self.learning_rate, betas=(0.5, 0.999))
def compute_mean_covariance(self, points):
bs, ch, nump = points.size()
# ----------------------------------------------------------------
mu = points.mean(dim=-1, keepdim=True) # Bx3xN -> Bx3x1
# ----------------------------------------------------------------
tmp = points - mu.repeat(1, 1, nump) # Bx3xN - Bx3xN -> Bx3xN
tmp_transpose = tmp.transpose(1, 2) # Bx3xN -> BxNx3
covariance = torch.bmm(tmp, tmp_transpose)
covariance = covariance / nump
return mu, covariance # Bx3x1 Bx3x3
def get_local_pair(self, pt1, pt2):
pt1_batch,pt1_N,pt1_M = pt1.size()
pt2_batch,pt2_N,pt2_M = pt2.size()
# pt1: Bx3xM pt2: Bx3XN (N > M)
#print('pt1: {} pt2: {}'.format(pt1.size(), pt2.size()))
new_xyz = pt1.transpose(1, 2).contiguous() # Bx3xM -> BxMx3
pt1_trans = pt1.transpose(1, 2).contiguous() # Bx3xM -> BxMx3
pt2_trans = pt2.transpose(1, 2).contiguous() # Bx3xN -> BxNx3
g_xyz1 = self.group(pt1_trans, new_xyz) # Bx3xMxK
#print('g_xyz1: {}'.format(g_xyz1.size()))
g_xyz2 = self.group(pt2_trans, new_xyz) # Bx3xMxK
#print('g_xyz2: {}'.format(g_xyz2.size()))
g_xyz1 = g_xyz1.transpose(1, 2).contiguous().view(-1, 3, 20) # Bx3xMxK -> BxMx3xK -> (BM)x3xK
#print('g_xyz1: {}'.format(g_xyz1.size()))
g_xyz2 = g_xyz2.transpose(1, 2).contiguous().view(-1, 3, 20) # Bx3xMxK -> BxMx3xK -> (BM)x3xK
#print('g_xyz2: {}'.format(g_xyz2.size()))
# print('====================== FPS ========================')
# print(pt1.shape,g_xyz1.shape)
# print(pt2.shape,g_xyz2.shape)
mu1, var1 = self.compute_mean_covariance(g_xyz1)
mu2, var2 = self.compute_mean_covariance(g_xyz2)
#print('mu1: {} var1: {}'.format(mu1.size(), var1.size()))
#print('mu2: {} var2: {}'.format(mu2.size(), var2.size()))
#--------------------------------------------------
# like_mu12 = self.shape_loss_fn(mu1, mu2)
# like_var12 = self.shape_loss_fn(var1, var2)
#----------------------------------------------------
#=========$$$ CD loss $$$===============
# print("p1,p2:",pt1.shape,pt2.shape)
# print("mu2:",mu1.shape,mu2.shape,pt1_batch,pt1_N,pt1_M)
mu1 = mu1.view(pt1_batch,-1,3)
mu2 = mu2.view(pt2_batch,-1,3)
var1 = var1.view(pt1_batch,-1,9)
var2 = var2.view(pt2_batch,-1,9)
like_mu12 = self.chamfer_loss(mu1,mu2) / float(pt1_M)
like_var12 = self.chamfer_loss(var1,var2) / float(pt1_M)
# import pdb
# pdb.set_trace()
#print('mu: {} var: {}'.format(like_mu12.item(), like_var12.item()))
return like_mu12, like_var12
def train(self):
# restore check-point if it exits
could_load, save_epoch = self.load(self.checkpoint_dir)
if could_load:
start_epoch = save_epoch
print(" [*] Load SUCCESS")
else:
start_epoch = 1
print(" [!] start epoch: {}".format(start_epoch))
# loop for epoch
start_time = time.time()
for epoch in range(start_epoch, self.epoch+1):
for idx, data in enumerate(self.dataloader, 0):
if idx+1 > self.num_batches: continue
# exit()
# ----------------train D-----------------------
points1, points2, points3, points4, _ = data
points1 = Variable(points1)
points2 = Variable(points2)
points3 = Variable(points3)
points4 = Variable(points4)
target = Variable(torch.from_numpy(np.ones(self.batch_size,).astype(np.int64))).cuda().float().reshape(self.batch_size, 1)
sim_noise = Variable(torch.Tensor(np.random.normal(0, 0.2, (self.batch_size, self.noise_dim)))).cuda()
fake1, fake2, fake3, fake4 = self.generator(sim_noise)
fake_target = Variable(torch.from_numpy(np.zeros(self.batch_size,).astype(np.int64))).cuda().float().reshape(self.batch_size, 1)
# ------------------D1---------------
self.optimizerD1.zero_grad()
# self.discriminator1.zero_grad()
points1 = points1.transpose(2, 1).cuda()
pred1 = self.discriminator1(points1)
pred1_fake = self.discriminator1(fake1.detach())
loss1_1 = self.loss_fn(pred1, target)
loss2_1 = self.loss_fn(pred1_fake, fake_target)
lossD1 = (loss1_1 + loss2_1) / 2.0
lossD1.backward()
self.optimizerD1.step()
# ------------------D2---------------
self.optimizerD2.zero_grad()
points2 = points2.transpose(2, 1).cuda()
pred2 = self.discriminator2(points2)
pred2_fake = self.discriminator2(fake2.detach())
loss1_2 = self.loss_fn(pred2, target)
loss2_2 = self.loss_fn(pred2_fake, fake_target)
lossD2 = (loss1_2 + loss2_2) / 2.0
lossD2.backward()
self.optimizerD2.step()
# ------------------D3---------------
self.optimizerD3.zero_grad()
points3 = points3.transpose(2, 1).cuda()
pred3 = self.discriminator3(points3)
pred3_fake = self.discriminator3(fake3.detach())
loss1_3 = self.loss_fn(pred3, target)
loss2_3 = self.loss_fn(pred3_fake, fake_target)
lossD3 = (loss1_3 + loss2_3) / 2.0
lossD3.backward()
self.optimizerD3.step()
# ------------------D4---------------
self.optimizerD4.zero_grad()
points4 = points4.transpose(2, 1).cuda()
pred4 = self.discriminator4(points4)
pred4_fake = self.discriminator4(fake4.detach())
loss1_4 = self.loss_fn(pred4, target)
loss2_4 = self.loss_fn(pred4_fake, fake_target)
lossD4 = (loss1_4 + loss2_4) / 2.0
lossD4.backward()
self.optimizerD4.step()
# -----------------------------------train G-----------------------------------
self.optimizerG.zero_grad()
sim_noise = Variable(torch.Tensor(np.random.normal(0, 0.2, (self.batch_size, self.noise_dim)))).cuda()
points1_gen, points2_gen, points3_gen, points4_gen = self.generator(sim_noise)
# p1=Bx3x256 p2=Bx3x512 p3=Bx3x1024 p4=Bx3x2048
#print('points1_gen: {}'.format(points1_gen.size()))
like_mu12, like_cov12 = self.get_local_pair(points1_gen, points2_gen)
like_mu13, like_cov13 = self.get_local_pair(points1_gen, points3_gen)
like_mu14, like_cov14 = self.get_local_pair(points1_gen, points4_gen)
like_mu23, like_cov23 = self.get_local_pair(points2_gen, points3_gen)
like_mu24, like_cov24 = self.get_local_pair(points2_gen, points4_gen)
like_mu34, like_cov34 = self.get_local_pair(points3_gen, points4_gen)
#exit()
#mu1, covariance1 = self.compute_mean_covariance(points1_gen)
#print('mu1: {} var1: {}'.format(mu1.size(), covariance1.size()))
#mu2, covariance2 = self.compute_mean_covariance(points2_gen)
#mu3, covariance3 = self.compute_mean_covariance(points3_gen)
#mu4, covariance4 = self.compute_mean_covariance(points4_gen)
#like_mu12 = self.shape_loss_fn(mu1, mu2)
#like_cov12 = self.shape_loss_fn(covariance1, covariance2)
#like_mu13 = self.shape_loss_fn(mu1, mu3)
#like_cov13 = self.shape_loss_fn(covariance1, covariance3)
#like_mu14 = self.shape_loss_fn(mu1, mu4)
#like_cov14 = self.shape_loss_fn(covariance1, covariance4)
#like_mu23 = self.shape_loss_fn(mu2, mu3)
#like_cov23 = self.shape_loss_fn(covariance2, covariance3)
#like_mu24 = self.shape_loss_fn(mu2, mu4)
#like_cov24 = self.shape_loss_fn(covariance2, covariance4)
#like_mu34 = self.shape_loss_fn(mu3, mu4)
#like_cov34 = self.shape_loss_fn(covariance3, covariance4)
pred_g1 = self.discriminator1(points1_gen)
pred_g2 = self.discriminator2(points2_gen)
pred_g3 = self.discriminator3(points3_gen)
pred_g4 = self.discriminator4(points4_gen)
target_g = Variable(torch.from_numpy(np.ones(self.batch_size, ).astype(np.int64))).cuda().float().reshape(self.batch_size, 1)
#print(pred_g, target)
g_loss_1 = self.loss_fn(pred_g1, target_g)
g_loss_2 = self.loss_fn(pred_g2, target_g)
g_loss_3 = self.loss_fn(pred_g3, target_g)
g_loss_4 = self.loss_fn(pred_g4, target_g)
w = 30.0
similar_loss = w * 1.0 * (like_mu12 + like_mu13 + like_mu14 + like_mu23 + like_mu24 + like_mu34) + \
w * 5.0 * (like_cov12 + like_cov13 + like_cov14 + like_cov23 + like_cov24 + like_cov34)
lossG = (1.2*g_loss_1 + 1.2*g_loss_2 + 1.2*g_loss_3 + g_loss_4) + 0.5*similar_loss
lossG.backward()
self.optimizerG.step()
# display training status
print("Epoch: [%2d] [%4d/%4d] time: %2dm %2ds d_loss1: %.8f d_loss2: %.8f d_loss3: %.8f d_loss4: %.8f, g_loss: %.8f, similar_loss: %.8f" \
% (epoch, idx+1, self.num_batches, (time.time()-start_time)/60,(time.time()-start_time)%60,
lossD1.item(), lossD2.item(), lossD3.item(), lossD4.item(), lossG.item(), similar_loss.item()))
self.log_string("Epoch: [%2d] [%4d/%4d] time: %2dm %2ds d_loss1: %.8f d_loss2: %.8f d_loss3: %.8f d_loss4: %.8f, g_loss: %.8f, similar_loss: %.8f" \
% (epoch, idx+1, self.num_batches, (time.time()-start_time)/60,(time.time()-start_time)%60,
lossD1.item(), lossD2.item(), lossD3.item(), lossD4.item(), lossG.item(), similar_loss.item()))
if epoch % self.snapshot == 0:
self.save(self.checkpoint_dir, epoch)
self.save(self.checkpoint_dir, self.epoch)
self.LOG_FOUT.close()
def test(self):
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else :
print(" [!] Load failed...")
sim_noise = Variable(torch.Tensor(np.random.normal(0, 0.2, (self.batch_size, self.noise_dim)))).cuda()
gen_points1, gen_points2, gen_points3, gen_points4 = self.generator(sim_noise)
# print(gen_points.shape)
gen_points1 = gen_points1.transpose(2, 1).cpu().data.numpy() # Bx3x256 -> Bx256x3
print(gen_points1.shape)
gen_points2 = gen_points2.transpose(2, 1).cpu().data.numpy() # Bx3x512 -> Bx512x3
print(gen_points2.shape)
gen_points3 = gen_points3.transpose(2, 1).cpu().data.numpy() # Bx3x1024 -> Bx1024x3
print(gen_points3.shape)
gen_points4 = gen_points4.transpose(2, 1).cpu().data.numpy() # Bx3x2048 -> Bx2048x3
print(gen_points4.shape)
save_dir = os.path.join(self.checkpoint_dir, self.model_dir, self.savename)
# save_dir = ( '../latent_3d_points/generated_data')
np.save(save_dir+'_1', gen_points1)
np.save(save_dir+'_2', gen_points2)
np.save(save_dir+'_3', gen_points3)
np.save(save_dir+'_4', gen_points4)
print('save generate data at: {}'.format(save_dir))
def log_string(self, out_str):
self.LOG_FOUT.write(out_str+'\n')
self.LOG_FOUT.flush()
# print(out_str)
def load(self, checkpoint_dir):
if self.pretrain_model_G is None and self.pretrain_model_D is None:
print('################ new training ################')
return False, 1
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name)
# ----------------- load G -------------------
if not self.pretrain_model_G is None:
resume_file_G = os.path.join(checkpoint_dir, self.pretrain_model_G)
flag_G = os.path.isfile(resume_file_G),
if flag_G == False:
print('G--> Error: no checkpoint directory found!')
exit()
else:
print('resume_file_G------>: {}'.format(resume_file_G))
checkpoint = torch.load(resume_file_G)
self.generator.load_state_dict(checkpoint['G_model'])
self.optimizerG.load_state_dict(checkpoint['G_optimizer'])
G_epoch = checkpoint['G_epoch']
else:
print(" [*] Failed to find the pretrain_model_G")
exit()
# ----------------- load D -------------------
if not self.pretrain_model_D is None:
resume_file_D = os.path.join(checkpoint_dir, self.pretrain_model_D)
flag_D = os.path.isfile(resume_file_D)
if flag_D == False:
print('D--> Error: no checkpoint directory found!')
exit()
else:
print('resume_file_D------>: {}'.format(resume_file_D))
checkpoint = torch.load(resume_file_D)
self.discriminator1.load_state_dict(checkpoint['D_model1'])
self.discriminator2.load_state_dict(checkpoint['D_model2'])
self.discriminator3.load_state_dict(checkpoint['D_model3'])
self.discriminator4.load_state_dict(checkpoint['D_model4'])
self.optimizerD1.load_state_dict(checkpoint['D_optimizer1'])
self.optimizerD2.load_state_dict(checkpoint['D_optimizer2'])
self.optimizerD3.load_state_dict(checkpoint['D_optimizer3'])
self.optimizerD4.load_state_dict(checkpoint['D_optimizer4'])
D_epoch = checkpoint['D_epoch']
else:
print(" [*] Failed to find the pretrain_model_D")
exit()
print(" [*] Success to load model --> {} & {}".format(self.pretrain_model_G, self.pretrain_model_D))
return True, G_epoch
def save(self, checkpoint_dir, index_epoch):
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
save_name = str(index_epoch)+'_'+self.category
path_save_G = os.path.join(checkpoint_dir, save_name+'_G.pth')
path_save_D = os.path.join(checkpoint_dir, save_name+'_D.pth')
print('Save Path for G: {}'.format(path_save_G))
print('Save Path for D: {}'.format(path_save_D))
torch.save({
'G_model': self.generator.state_dict(),
'G_optimizer': self.optimizerG.state_dict(),
'G_epoch': index_epoch,
}, path_save_G)
torch.save({
'D_model1': self.discriminator1.state_dict(),
'D_model2': self.discriminator2.state_dict(),
'D_model3': self.discriminator3.state_dict(),
'D_model4': self.discriminator4.state_dict(),
'D_optimizer1': self.optimizerD1.state_dict(),
'D_optimizer2': self.optimizerD2.state_dict(),
'D_optimizer3': self.optimizerD3.state_dict(),
'D_optimizer4': self.optimizerD4.state_dict(),
'D_epoch': index_epoch,
}, path_save_D)
def MSE_LOSS(self, label, pred):
return tf.losses.mean_squared_error(label, pred)
################################################################################################
# -------------------------------- class of nework structure -----------------------------------
################################################################################################
# ---------------------------------------G---------------------------------------
import nn_utils
def get_edge_features(x, k, num=-1):
"""
Args:
x: point cloud [B, dims, N]
k: kNN neighbours
Return:
[B, 2dims, N, k]
"""
B, dims, N = x.shape
# batched pair-wise distance
xt = x.permute(0, 2, 1)
xi = -2 * torch.bmm(xt, x)
xs = torch.sum(xt**2, dim=2, keepdim=True)
xst = xs.permute(0, 2, 1)
dist = xi + xs + xst # [B, N, N]
# get k NN id
_, idx_o = torch.sort(dist, dim=2)
idx = idx_o[: ,: ,1:k+1] # [B, N, k]
idx = idx.contiguous().view(B, N*k)
# gather
neighbors = []
for b in range(B):
tmp = torch.index_select(x[b], 1, idx[b]) # [d, N*k] <- [d, N], 0, [N*k]
tmp = tmp.view(dims, N, k)
neighbors.append(tmp)
neighbors = torch.stack(neighbors) # [B, d, N, k]
# centralize
central = x.unsqueeze(3) # [B, d, N, 1]
central = central.repeat(1, 1, 1, k) # [B, d, N, k]
ee = torch.cat([central, neighbors-central], dim=1)
assert ee.shape == (B, 2*dims, N, k)
return ee
def get_edge_features_xyz(x, pc, k, num=-1):
"""
Args:
x: point cloud [B, dims, N]
k: kNN neighbours
Return:
[B, 2dims, N, k]
idx
"""
B, dims, N = x.shape
# ----------------------------------------------------------------
# batched pair-wise distance in feature space maybe is can be changed to coordinate space
# ----------------------------------------------------------------
xt = x.permute(0, 2, 1)
xi = -2 * torch.bmm(xt, x)
xs = torch.sum(xt**2, dim=2, keepdim=True)
xst = xs.permute(0, 2, 1)
dist = xi + xs + xst # [B, N, N]
# get k NN id
_, idx_o = torch.sort(dist, dim=2)
idx = idx_o[: ,: ,1:k+1] # [B, N, k]
idx = idx.contiguous().view(B, N*k)
# gather
neighbors = []
xyz =[]
for b in range(B):
tmp = torch.index_select(x[b], 1, idx[b]) # [d, N*k] <- [d, N], 0, [N*k]
tmp = tmp.view(dims, N, k)
neighbors.append(tmp)
tp = torch.index_select(pc[b], 1, idx[b])
tp = tp.view(3, N, k)
xyz.append(tp)
neighbors = torch.stack(neighbors) # [B, d, N, k]
xyz = torch.stack(xyz) # [B, 3, N, k]
# centralize
central = x.unsqueeze(3).repeat(1, 1, 1, k) # [B, d, N, 1] -> [B, d, N, k]
central_xyz = pc.unsqueeze(3).repeat(1, 1, 1, k) # [B, 3, N, 1] -> [B, 3, N, k]
e_fea = torch.cat([central, neighbors-central], dim=1)
e_xyz = torch.cat([central_xyz, xyz-central_xyz], dim=1)
assert e_fea.size() == (B, 2*dims, N, k) and e_xyz.size() == (B, 2*3, N, k)
return e_fea, e_xyz
class conv2dbr(nn.Module):
""" Conv2d-bn-relu
[B, Fin, H, W] -> [B, Fout, H, W]
"""
def __init__(self, Fin, Fout, kernel_size, stride=1):
super(conv2dbr, self).__init__()
self.conv = nn.Conv2d(Fin, Fout, kernel_size, stride)
self.bn = nn.BatchNorm2d(Fout)
self.ac = nn.ReLU(True)
def forward(self, x):
x = self.conv(x) # [B, Fout, H, W]
x = self.bn(x)
x = self.ac(x)
return x
class upsample_edgeConv(nn.Module):
""" Edge Convolution using 1x1 Conv h
[B, Fin, N] -> [B, Fout, N]
"""
def __init__(self, Fin, Fout, k, num):
super(upsample_edgeConv, self).__init__()
self.k = k
self.Fin = Fin
self.Fout = Fout
self.num = num
#self.conv1 = conv2dbr(2*Fin, 2*Fin, 1, 1)
#self.conv2 = conv2dbr(2*Fin, 2*Fout, [1, 2*k+2], [1, 1])
self.conv2 = conv2dbr(2*Fin, 2*Fout, [1, 2*k], [1, 1])
self.inte_conv_hk = nn.Sequential(
#nn.Conv2d(2*Fin, 4*Fin, [1, k//2], [1, 1]), # Fin, Fout, kernel_size, stride
nn.Conv2d(2*Fin, 4*Fin, [1, k//2+1], [1, 1]), # Fin, Fout, kernel_size, stride
nn.BatchNorm2d(4*Fin),
nn.LeakyReLU(inplace=True)
)
def forward(self, x):
B, Fin, N = x.shape
x = get_edge_features(x, self.k, self.num); # [B, 2Fin, N, k]
# -------------learn_v2----------------------
BB, CC, NN, KK = x.size()
#x = self.conv1(x)
inte_x = self.inte_conv_hk(x) # Bx2CxNxk/2
inte_x = inte_x.transpose(2, 1) # BxNx2Cxk/2
#inte_x = inte_x.contiguous().view(BB, NN, CC, 2, KK//2+1) # BxNxCx2x(k//2+1)
#inte_x = inte_x.contiguous().view(BB, NN, CC, KK+2) # BxNxCx(k+2)
inte_x = inte_x.contiguous().view(BB, NN, CC, 2, KK//2) # BxNxCx2x(k//2+1)
inte_x = inte_x.contiguous().view(BB, NN, CC, KK) # BxNxCx(k+2)
inte_x = inte_x.permute(0, 2, 1, 3) # BxCxNxk
merge_x = torch.cat((x, inte_x), 3) # BxCxNx2k
x = self.conv2(merge_x) # [B, 2*Fout, N, 1]
x = x.unsqueeze(3) # BxkcxN
x = x.contiguous().view(B, self.Fout, 2, N)
x = x.contiguous().view(B, self.Fout, 2*N)
assert x.shape == (B, self.Fout, 2*N)
return x
class bilateral_upsample_edgeConv(nn.Module):
""" Edge Convolution using 1x1 Conv h
[B, Fin, N] -> [B, Fout, N]
"""
def __init__(self, Fin, Fout, k, num, softmax=True):
super(bilateral_upsample_edgeConv, self).__init__()
self.k = k
self.Fin = Fin
self.Fout = Fout
self.softmax = softmax
self.num = num
# self.conv = conv2dbr(2*Fin, Fout, [1, 20], [1, 20])
#self.conv1 = conv2dbr(2*Fin, 2*Fin, 1 ,1)
self.conv2 = conv2dbr(2*Fin, 2*Fout, [1, 2*k], [1, 1])
self.conv_xyz = nn.Sequential(
nn.Conv2d(6, 16, 1),
nn.BatchNorm2d(16),
nn.LeakyReLU(inplace=True)
)
self.conv_fea = nn.Sequential(
nn.Conv2d(2*Fin, 16, 1),
nn.BatchNorm2d(16),
nn.LeakyReLU(inplace=True)
)
self.conv_all = nn.Sequential(
nn.Conv2d(16, 64, 1),
nn.BatchNorm2d(64),
nn.LeakyReLU(inplace=True),
nn.Conv2d(64, 2*Fin, 1),
nn.BatchNorm2d(2*Fin),
nn.LeakyReLU(inplace=True)
)
self.inte_conv_hk = nn.Sequential(
#nn.Conv2d(2*Fin, 4*Fin, [1, k//2], [1, 1]), # Fin, Fout, kernel_size, stride
nn.Conv2d(2*Fin, 4*Fin, [1, k//2+1], [1, 1]), # Fin, Fout, kernel_size, stride
nn.BatchNorm2d(4*Fin),
nn.LeakyReLU(inplace = True)
)
def forward(self, x, pc):
B, Fin, N = x.size()
#x = get_edge_features(x, self.k, self.num); # [B, 2Fin, N, k]
x, y = get_edge_features_xyz(x, pc, self.k, self.num); # feature x: [B, 2Fin, N, k] coordinate y: [B, 6, N, k]
w_fea = self.conv_fea(x)
w_xyz = self.conv_xyz(y)
w = w_fea * w_xyz
w = self.conv_all(w)
if self.softmax == True:
w = F.softmax(w, dim=-1) # [B, Fout, N, k] -> [B, Fout, N, k]
# -------------learn_v2----------------------
BB, CC, NN, KK = x.size()
#x = self.conv1(x)
inte_x = self.inte_conv_hk(x) # Bx2CxNxk/2
inte_x = inte_x.transpose(2, 1) # BxNx2Cxk/2
inte_x = inte_x.contiguous().view(BB, NN, CC, 2, KK//2) # BxNxCx2x(k//2+1)
inte_x = inte_x.contiguous().view(BB, NN, CC, KK) # BxNxCx(k+2)
inte_x = inte_x.permute(0, 2, 1, 3) # BxCxNx(k+2)
inte_x = inte_x * w
# Here we concatenate the interpolated feature with the original feature.
merge_x = torch.cat((x, inte_x), 3) # BxCxNx2k
# Since conv2 uses a wide kernel size, the process of sorting by distance can be omitted.
x = self.conv2(merge_x) # [B, 2*Fout, N, 1]
x = x.unsqueeze(3) # BxkcxN
x = x.contiguous().view(B, self.Fout, 2, N)
x = x.contiguous().view(B, self.Fout, 2*N)
assert x.shape == (B, self.Fout, 2*N)
return x
class edgeConv(nn.Module):
""" Edge Convolution using 1x1 Conv h
[B, Fin, N] -> [B, Fout, N]
"""
def __init__(self, Fin, Fout, k):
super(edgeConv, self).__init__()
self.k = k
self.Fin = Fin
self.Fout = Fout
self.conv = nn_utils.conv2dbr(2*Fin, Fout, 1)
def forward(self, x):
B, Fin, N = x.shape
x = get_edge_features(x, self.k); # [B, 2Fin, N, k]
x = self.conv(x) # [B, Fout, N, k]
x, _ = torch.max(x, 3) # [B, Fout, N]
assert x.shape == (B, self.Fout, N)
return x
class bilateral_block_l1(nn.Module):
def __init__(self,Fin,Fout,maxpool,stride=1,num_k = 20):
super(bilateral_block_l1,self).__init__()
self.maxpool = nn.MaxPool2d((1,maxpool),(1,1))
self.upsample_cov = nn.Sequential(
upsample_edgeConv(Fin, Fout, num_k//2,1), #(128->256)
nn.BatchNorm1d(Fout),
nn.LeakyReLU(inplace=True)
)
self.fc = nn.Sequential(
nn.Linear(Fin, Fin),
nn.BatchNorm1d(Fin),
nn.LeakyReLU(inplace=True),
#nn.Linear(Fin, 2*Fin),
#nn.BatchNorm1d(2*Fin),
#nn.LeakyReLU(inplace=True),
nn.Linear(Fin, Fout),
nn.BatchNorm1d(Fout),
nn.LeakyReLU(inplace=True),
)
self.g_fc = nn.Sequential(
nn.Linear(Fout,512),
nn.BatchNorm1d(512),
nn.LeakyReLU(inplace=True),
)
def forward(self,x):
batchsize = x.size()[0]
point_num = x.size()[2]
xs = self.maxpool(x)
xs = xs.view(batchsize,-1)
xs = self.fc(xs)
g = self.g_fc(xs)
g = g.view(batchsize, -1, 1)
g = g.repeat(1, 1, 2*point_num)
xs = xs.view(batchsize,-1,1)
xs = xs.repeat(1,1,2*point_num)
x_ec = self.upsample_cov(x)
x_out = torch.cat((xs,x_ec),1)
g_out = torch.cat((g, x_ec), dim=1)
return x_out, g_out
class bilateral_block_l2(nn.Module):
def __init__(self, Fin, Fout, maxpool, stride=1, num_k=20, softmax=True):
super(bilateral_block_l2,self).__init__()
self.maxpool = nn.MaxPool2d((1,maxpool),(1,1))
self.upsample_cov = bilateral_upsample_edgeConv(Fin, Fout, num_k//2, 1, softmax=softmax) #(256->512)
self.bn_uc = nn.BatchNorm1d(Fout)
self.relu_uc = nn.LeakyReLU(inplace=True)
self.fc = nn.Sequential(
nn.Linear(Fin, Fin),
nn.BatchNorm1d(Fin),
nn.LeakyReLU(inplace=True),
#nn.Linear(Fin, 2*Fin),
#nn.BatchNorm1d(2*Fin),
#nn.LeakyReLU(inplace=True),
nn.Linear(Fin, Fout),
nn.BatchNorm1d(Fout),
nn.LeakyReLU(inplace=True),
)
self.g_fc = nn.Sequential(
nn.Linear(Fout,512),
nn.BatchNorm1d(512),
nn.LeakyReLU(inplace=True),
)
def forward(self, x, pc):
batchsize, _, point_num = x.size()
xs = self.maxpool(x)
xs = xs.view(batchsize,-1)
xs = self.fc(xs)
g = self.g_fc(xs)
g = g.view(batchsize, -1, 1)
g = g.repeat(1, 1, 2*point_num)
xs = xs.view(batchsize,-1,1)
xs = xs.repeat(1, 1, 2*point_num)
x_ec = self.relu_uc(self.bn_uc(self.upsample_cov(x, pc)))
x_out = torch.cat((xs, x_ec), 1)
g_out = torch.cat((g, x_ec), dim=1)
return x_out, g_out
class bilateral_block_l3(nn.Module):
def __init__(self, Fin, Fout, maxpool, stride=1, num_k=20, softmax=True):
super(bilateral_block_l3,self).__init__()
self.maxpool = nn.MaxPool2d((1,maxpool),(1,1))
self.upsample_cov = bilateral_upsample_edgeConv(Fin, Fout, num_k//2, 1, softmax=softmax) #(256->512)
self.bn_uc = nn.BatchNorm1d(Fout)
self.relu_uc = nn.LeakyReLU(inplace=True)
self.fc = nn.Sequential(
nn.Linear(Fin, Fin),
nn.BatchNorm1d(Fin),
nn.LeakyReLU(inplace=True),
#nn.Linear(Fin,2*Fin),
#nn.BatchNorm1d(2*Fin),
#nn.LeakyReLU(inplace=True),
nn.Linear(Fin, Fout),
nn.BatchNorm1d(Fout),
nn.LeakyReLU(inplace=True),
)
self.g_fc = nn.Sequential(
nn.Linear(Fout, 512),
nn.BatchNorm1d(512),
nn.LeakyReLU(inplace=True),
)
def forward(self, x, pc):
batchsize = x.size()[0]
point_num = x.size()[2]
xs = self.maxpool(x)
xs = xs.view(batchsize,-1)
xs = self.fc(xs)
g = self.g_fc(xs)
g = g.view(batchsize, -1, 1)
g = g.repeat(1, 1, 2*point_num)
xs = xs.view(batchsize,-1,1)
xs = xs.repeat(1,1,2*point_num)
#x_ec = self.upsample_cov(x)
x_ec = self.relu_uc(self.bn_uc(self.upsample_cov(x, pc)))
x_out = torch.cat((xs,x_ec),1)
g_out = torch.cat((g, x_ec), dim=1)
return x_out, g_out
class bilateral_block_l4(nn.Module):
def __init__(self, Fin, Fout, maxpool, stride=1, num_k=20, softmax=True):
super(bilateral_block_l4, self).__init__()
self.maxpool = nn.MaxPool2d((1,maxpool),(1,1))
self.upsample_cov = bilateral_upsample_edgeConv(Fin, Fout, num_k//2, 1, softmax=softmax) #(256->512)
self.bn_uc = nn.BatchNorm1d(Fout)
self.relu_uc = nn.LeakyReLU(inplace=True)
self.fc = nn.Sequential(
nn.Linear(Fin, Fin),
nn.BatchNorm1d(Fin),
nn.LeakyReLU(inplace=True),
#nn.Linear(Fin,2*Fin),
#nn.BatchNorm1d(2*Fin),
#nn.LeakyReLU(inplace=True),
nn.Linear(Fin, Fout),
nn.BatchNorm1d(Fout),
nn.LeakyReLU(inplace=True),
)
def forward(self, x, pc):
batchsize = x.size()[0]
point_num = x.size()[2]
xs = self.maxpool(x)
xs = xs.view(batchsize,-1)
xs = self.fc(xs)
xs = xs.view(batchsize,-1,1)
xs = xs.repeat(1,1,2*point_num)
#x_ec = self.upsample_cov(x)
x_ec = self.relu_uc(self.bn_uc(self.upsample_cov(x, pc)))
x_out = torch.cat((xs,x_ec),1)
return x_out
class PointGenerator(nn.Module):
def __init__(self, num_point=2048, num_k=20, softmax=True):
super(PointGenerator, self).__init__()
self.num_point = num_point
self.num_k = num_k
self.fc1 = nn.Sequential(
nn.Linear(128, 4096),
nn.BatchNorm1d(4096), # 128,32
nn.LeakyReLU(inplace=True)
)
self.bilateral1 = bilateral_block_l1(32, 32, 128, num_k=num_k)
self.bilateral2 = bilateral_block_l2(64, 64, 256, num_k=num_k, softmax=softmax)
self.bilateral3 = bilateral_block_l3(128, 128, 512, num_k=num_k, softmax=softmax)
self.bilateral4 = bilateral_block_l4(256, 256, 1024, num_k=num_k, softmax=softmax)
self.mlp1 = nn.Sequential(
nn.Conv1d(512+32, 256, 1),
nn.LeakyReLU(inplace=True),
nn.Conv1d(256, 64, 1),
nn.LeakyReLU(inplace=True),
nn.Conv1d(64, 3, 1),
nn.Tanh()
)
self.mlp2 = nn.Sequential(
nn.Conv1d(512+64, 256, 1),
nn.LeakyReLU(inplace=True),
nn.Conv1d(256, 64, 1),
nn.LeakyReLU(inplace=True),
nn.Conv1d(64, 3, 1),
nn.Tanh()
)
self.mlp3 = nn.Sequential(
nn.Conv1d(512+128, 256, 1),
nn.LeakyReLU(inplace=True),
nn.Conv1d(256, 64, 1),
nn.LeakyReLU(inplace=True),
nn.Conv1d(64, 3, 1),
nn.Tanh()
)
self.mlp4 = nn.Sequential(
nn.Conv1d(512, 256, 1),
nn.LeakyReLU(inplace=True),
nn.Conv1d(256, 64, 1),
nn.LeakyReLU(inplace=True),
nn.Conv1d(64, 3, 1),
nn.Tanh()
)
def forward(self, x):
batchsize = x.size()[0]
x = self.fc1(x)
x = x.view(batchsize, 32, 128) # Bx32x128
x1, g_x1 = self.bilateral1(x) # x1: Bx64x256
x1s = self.mlp1(g_x1) # Bx3x256
#print('x1: {} x1s: {}'.format(x1.size(), x1s.size()))
x2, g_x2 = self.bilateral2(x1, x1s) # x2: Bx128x512
x2s = self.mlp2(g_x2) # Bx3x512
#print('x2: {} x2s: {}'.format(x2.size(), x2s.size()))
x3, g_x3 = self.bilateral3(x2, x2s) # x3: Bx256x1024
x3s = self.mlp3(g_x3) # Bx3x1024
#print('x3: {} x3s: {}'.format(x3.size(), x3s.size()))
x4 = self.bilateral4(x3, x3s) # x4: Bx512x2048
x4s = self.mlp4(x4) # Bx3x2048
#print('x4: {} x4s: {}'.format(x4.size(), x4s.size()))
#exit()
return x1s, x2s, x3s, x4s
# ---------------------------------------D---------------------------------------
class PointDiscriminator_1(nn.Module):
def __init__(self, num_point=256):
super(PointDiscriminator_1, self).__init__()
self.num_point = num_point
self.fc1 = nn.Sequential(
nn.Conv1d(3,64,1),
nn.BatchNorm1d(64),
nn.LeakyReLU(inplace=True),
nn.Conv1d(64,128,1),
nn.BatchNorm1d(128),
nn.LeakyReLU(inplace=True),
nn.Conv1d(128,256,1),
nn.BatchNorm1d(256),
nn.LeakyReLU(inplace=True),
#nn.Conv1d(256,1024,1),
#nn.BatchNorm1d(1024),
#nn.LeakyReLU()
)
self.maxpool = nn.MaxPool1d(num_point,1)
self.mlp = nn.Sequential(
#nn.Linear(1024,512),
#nn.LeakyReLU(),
nn.Linear(256,128),
nn.LeakyReLU(inplace=True),
nn.Linear(128,64),
nn.LeakyReLU(inplace=True),
nn.Linear(64,1)
)
def forward(self, x):
batchsize = x.size()[0]
#print('d1: x', x.size())
#x = x.view(batchsize,3,self.num_point)
x1 = self.fc1(x)
x2 = self.maxpool(x1)
x2 = x2.view(batchsize,256)
x3 = self.mlp(x2)
return x3
class PointDiscriminator_2(nn.Module):
def __init__(self, num_point=512):
super(PointDiscriminator_2, self).__init__()
self.num_point = num_point
self.fc1 = nn.Sequential(
nn.Conv1d(3,64,1),
nn.BatchNorm1d(64),
nn.LeakyReLU(inplace=True),
nn.Conv1d(64,128,1),
nn.BatchNorm1d(128),
nn.LeakyReLU(inplace=True),
nn.Conv1d(128,256,1),
nn.BatchNorm1d(256),
nn.LeakyReLU(inplace=True),
nn.Conv1d(256,512,1),
nn.BatchNorm1d(512),
nn.LeakyReLU(inplace=True)
)
self.maxpool = nn.MaxPool1d(num_point,1)
self.mlp = nn.Sequential(
nn.Linear(512,256),
nn.LeakyReLU(inplace=True),
nn.Linear(256,64),
nn.LeakyReLU(inplace=True),
nn.Linear(64,1)
)
def forward(self, x):
batchsize = x.size()[0]
x1 = self.fc1(x)
x2 = self.maxpool(x1)
x2 = x2.view(batchsize,512)
x3 = self.mlp(x2)
return x3
class PointDiscriminator_3(nn.Module):
def __init__(self, num_point=1024):
super(PointDiscriminator_3, self).__init__()
self.num_point = num_point
self.fc1 = nn.Sequential(
nn.Conv1d(3,64,1),
nn.BatchNorm1d(64),
nn.LeakyReLU(inplace=True),
nn.Conv1d(64,128,1),
nn.BatchNorm1d(128),
nn.LeakyReLU(inplace=True),
nn.Conv1d(128,256,1),
nn.BatchNorm1d(256),
nn.LeakyReLU(inplace=True),
nn.Conv1d(256,512,1),
nn.BatchNorm1d(512),
nn.LeakyReLU(inplace=True)
)
self.maxpool = nn.MaxPool1d(num_point,1)
self.mlp = nn.Sequential(
#nn.Linear(1024,512),
#nn.LeakyReLU(),
nn.Linear(512,256),
nn.LeakyReLU(inplace=True),
nn.Linear(256,64),
nn.LeakyReLU(inplace=True),
nn.Linear(64,1)
)
def forward(self, x):
batchsize = x.size()[0]
#print('d2: x', x.size())
#x = x.view(batchsize,3,self.num_point)
x1 = self.fc1(x)
x2 = self.maxpool(x1)
x2 = x2.view(batchsize,512)
x3 = self.mlp(x2)
return x3
class PointDiscriminator_4(nn.Module):
def __init__(self, num_point=2048):
super(PointDiscriminator_4, self).__init__()
self.num_point = num_point
self.fc1 = nn.Sequential(
nn.Conv1d(3,64,1),
nn.BatchNorm1d(64),
nn.LeakyReLU(inplace=True),
nn.Conv1d(64,128,1),
nn.BatchNorm1d(128),
nn.LeakyReLU(inplace=True),
nn.Conv1d(128,256,1),
nn.BatchNorm1d(256),
nn.LeakyReLU(inplace=True),
nn.Conv1d(256,1024,1),
nn.BatchNorm1d(1024),
nn.LeakyReLU(inplace=True)
)
self.maxpool = nn.MaxPool1d(num_point,1)
self.mlp = nn.Sequential(
nn.Linear(1024,512),
nn.LeakyReLU(inplace=True),
nn.Linear(512,256),
nn.LeakyReLU(inplace=True),
nn.Linear(256,64),
nn.LeakyReLU(inplace=True),
nn.Linear(64,1)
)
def forward(self, x):
batchsize = x.size()[0]
#print('d3: x', x.size())
#x = x.view(batchsize,3,self.num_point)
x1 = self.fc1(x)
x2 = self.maxpool(x1)
x2 = x2.view(batchsize,1024)
x3 = self.mlp(x2)
return x3
|
the-stack_106_19515
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.11.1-SNAPSHOT
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VariableRegistryDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'variables': 'list[VariableEntity]',
'process_group_id': 'str'
}
attribute_map = {
'variables': 'variables',
'process_group_id': 'processGroupId'
}
def __init__(self, variables=None, process_group_id=None):
"""
VariableRegistryDTO - a model defined in Swagger
"""
self._variables = None
self._process_group_id = None
if variables is not None:
self.variables = variables
if process_group_id is not None:
self.process_group_id = process_group_id
@property
def variables(self):
"""
Gets the variables of this VariableRegistryDTO.
The variables that are available in this Variable Registry
:return: The variables of this VariableRegistryDTO.
:rtype: list[VariableEntity]
"""
return self._variables
@variables.setter
def variables(self, variables):
"""
Sets the variables of this VariableRegistryDTO.
The variables that are available in this Variable Registry
:param variables: The variables of this VariableRegistryDTO.
:type: list[VariableEntity]
"""
self._variables = variables
@property
def process_group_id(self):
"""
Gets the process_group_id of this VariableRegistryDTO.
The UUID of the Process Group that this Variable Registry belongs to
:return: The process_group_id of this VariableRegistryDTO.
:rtype: str
"""
return self._process_group_id
@process_group_id.setter
def process_group_id(self, process_group_id):
"""
Sets the process_group_id of this VariableRegistryDTO.
The UUID of the Process Group that this Variable Registry belongs to
:param process_group_id: The process_group_id of this VariableRegistryDTO.
:type: str
"""
self._process_group_id = process_group_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VariableRegistryDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_106_19516
|
from board import SCL, SDA
import busio
from PIL import Image, ImageDraw, ImageFont
import adafruit_ssd1306
class display:
def __init__( self, config:object ):
self.i2c = busio.I2C(SCL,SDA)
self.display = adafruit_ssd1306.SSD1306_I2C( config['DISPLAY']['WIDTH'], config['DISPLAY']['HEIGHT'], Self.i2c)
self.padding = config['DISPLAY']['PADDING']
self.top = self.padding
self.bottom = self.display.height - self.padding
self.x = 0
self.txt_height = 8
self.font = ImageFont.load_default()
def clear_display(self):
self.display.fill(0)
self.display.show()
def print_lines( self,lines:list ):
image = Image.new("1", (self.display.width, self.display.height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, self.display.width, self.display.height), outline=0, fill=0)
y = 0
for line in lines:
draw.text((self.x, self.top + y), line, font=self.font, fill= 255)
y+=self.txt_height
self.display.image(image)
self.display.show()
|
the-stack_106_19517
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from absl.testing import absltest, parameterized
import numpy as np
import jax
from jax.config import config
import jax.dlpack
import jax.numpy as jnp
from jax import test_util as jtu
config.parse_flags_with_absl()
try:
import torch
import torch.utils.dlpack
except ImportError:
torch = None
try:
import cupy
except ImportError:
cupy = None
dlpack_dtypes = [jnp.int8, jnp.int16, jnp.int32, jnp.int64,
jnp.uint8, jnp.uint16, jnp.uint32, jnp.uint64,
jnp.float16, jnp.float32, jnp.float64]
all_dtypes = dlpack_dtypes + [jnp.bool_, jnp.bfloat16]
torch_dtypes = [jnp.int8, jnp.int16, jnp.int32, jnp.int64,
jnp.uint8, jnp.float16, jnp.float32, jnp.float64]
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (2, 3, 4)]
empty_array_shapes = []
empty_array_shapes += [(0,), (0, 4), (3, 0),]
nonempty_nonscalar_array_shapes += [(3, 1), (1, 4), (2, 1, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
all_shapes = nonempty_array_shapes + empty_array_shapes
class DLPackTest(jtu.JaxTestCase):
def setUp(self):
if jtu.device_under_test() == "tpu":
self.skipTest("DLPack not supported on TPU")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in dlpack_dtypes))
def testJaxRoundTrip(self, shape, dtype):
rng = jtu.rand_default()
np = rng(shape, dtype)
x = jnp.array(np)
dlpack = jax.dlpack.to_dlpack(x)
y = jax.dlpack.from_dlpack(dlpack)
self.assertAllClose(x, y, check_dtypes=True)
self.assertRaisesRegex(RuntimeError,
"DLPack tensor may be consumed at most once",
lambda: jax.dlpack.from_dlpack(dlpack))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in torch_dtypes))
@unittest.skipIf(not torch, "Test requires PyTorch")
def testTorchToJax(self, shape, dtype):
rng = jtu.rand_default()
np = rng(shape, dtype)
x = torch.from_numpy(np)
x = x.cuda() if jtu.device_under_test() == "gpu" else x
dlpack = torch.utils.dlpack.to_dlpack(x)
y = jax.dlpack.from_dlpack(dlpack)
self.assertAllClose(np, y, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in torch_dtypes))
@unittest.skipIf(not torch, "Test requires PyTorch")
# TODO(phawkins): the dlpack destructor issues errors in jaxlib 0.1.38.
def testJaxToTorch(self, shape, dtype):
rng = jtu.rand_default()
np = rng(shape, dtype)
x = jnp.array(np)
dlpack = jax.dlpack.to_dlpack(x)
y = torch.utils.dlpack.from_dlpack(dlpack)
self.assertAllClose(np, y.numpy(), check_dtypes=True)
class CudaArrayInterfaceTest(jtu.JaxTestCase):
def setUp(self):
if jtu.device_under_test() != "gpu":
self.skipTest("__cuda_array_interface__ is only supported on GPU")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in dlpack_dtypes))
@unittest.skipIf(not cupy, "Test requires CuPy")
def testJaxToCuPy(self, shape, dtype):
rng = jtu.rand_default()
x = rng(shape, dtype)
y = jnp.array(x)
z = cupy.asarray(y)
self.assertEqual(y.__cuda_array_interface__["data"][0],
z.__cuda_array_interface__["data"][0])
self.assertAllClose(x, cupy.asnumpy(z), check_dtypes=True)
if __name__ == "__main__":
absltest.main()
|
the-stack_106_19519
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, Generator, List, Optional, Tuple, Type, TypeVar, Union
from . import enums, flags, utils
from .asset import Asset
from .colour import Colour
from .invite import Invite
from .mixins import Hashable
from .object import Object
from .permissions import PermissionOverwrite, Permissions
__all__ = (
'AuditLogDiff',
'AuditLogChanges',
'AuditLogEntry',
)
if TYPE_CHECKING:
import datetime
from . import abc
from .emoji import Emoji
from .guild import Guild
from .member import Member
from .role import Role
from .scheduled_event import ScheduledEvent
from .types.audit_log import (
AuditLogChange as AuditLogChangePayload,
AuditLogEntry as AuditLogEntryPayload,
)
from .types.channel import (
PartialChannel as PartialChannelPayload,
PermissionOverwrite as PermissionOverwritePayload,
)
from .types.invite import Invite as InvitePayload
from .types.role import Role as RolePayload
from .types.snowflake import Snowflake
from .user import User
from .stage_instance import StageInstance
from .sticker import GuildSticker
from .threads import Thread
TargetType = Union[
Guild, abc.GuildChannel, Member, User, Role, Invite, Emoji, StageInstance, GuildSticker, Thread, Object, None
]
def _transform_timestamp(entry: AuditLogEntry, data: Optional[str]) -> Optional[datetime.datetime]:
return utils.parse_time(data)
def _transform_color(entry: AuditLogEntry, data: int) -> Colour:
return Colour(data)
def _transform_snowflake(entry: AuditLogEntry, data: Snowflake) -> int:
return int(data)
def _transform_channel(entry: AuditLogEntry, data: Optional[Snowflake]) -> Optional[Union[abc.GuildChannel, Object]]:
if data is None:
return None
return entry.guild.get_channel(int(data)) or Object(id=data)
def _transform_member_id(entry: AuditLogEntry, data: Optional[Snowflake]) -> Union[Member, User, None]:
if data is None:
return None
return entry._get_member(int(data))
def _transform_guild_id(entry: AuditLogEntry, data: Optional[Snowflake]) -> Optional[Guild]:
if data is None:
return None
return entry._state._get_guild(int(data))
def _transform_overwrites(
entry: AuditLogEntry, data: List[PermissionOverwritePayload]
) -> List[Tuple[Object, PermissionOverwrite]]:
overwrites = []
for elem in data:
allow = Permissions(int(elem['allow']))
deny = Permissions(int(elem['deny']))
ow = PermissionOverwrite.from_pair(allow, deny)
ow_type = elem['type']
ow_id = int(elem['id'])
target = None
if ow_type == '0':
target = entry.guild.get_role(ow_id)
elif ow_type == '1':
target = entry._get_member(ow_id)
if target is None:
target = Object(id=ow_id)
overwrites.append((target, ow))
return overwrites
def _transform_icon(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]:
if data is None:
return None
if entry.action is enums.AuditLogAction.guild_update:
return Asset._from_guild_icon(entry._state, entry.guild.id, data)
else:
return Asset._from_icon(entry._state, entry._target_id, data, path='role') # type: ignore - target_id won't be None in this case
def _transform_avatar(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]:
if data is None:
return None
return Asset._from_avatar(entry._state, entry._target_id, data) # type: ignore - target_id won't be None in this case
def _guild_hash_transformer(path: str) -> Callable[[AuditLogEntry, Optional[str]], Optional[Asset]]:
def _transform(entry: AuditLogEntry, data: Optional[str]) -> Optional[Asset]:
if data is None:
return None
return Asset._from_guild_image(entry._state, entry.guild.id, data, path=path)
return _transform
E = TypeVar('E', bound=enums.Enum)
def _enum_transformer(enum: Type[E]) -> Callable[[AuditLogEntry, int], E]:
def _transform(entry: AuditLogEntry, data: int) -> E:
return enums.try_enum(enum, data)
return _transform
F = TypeVar('F', bound=flags.BaseFlags)
def _flag_transformer(cls: Type[F]) -> Callable[[AuditLogEntry, Union[int, str]], F]:
def _transform(entry: AuditLogEntry, data: Union[int, str]) -> F:
return cls._from_value(int(data))
return _transform
def _transform_type(entry: AuditLogEntry, data: int) -> Union[enums.ChannelType, enums.StickerType]:
if entry.action.name.startswith('sticker_'):
return enums.try_enum(enums.StickerType, data)
else:
return enums.try_enum(enums.ChannelType, data)
class AuditLogDiff:
def __len__(self) -> int:
return len(self.__dict__)
def __iter__(self) -> Generator[Tuple[str, Any], None, None]:
yield from self.__dict__.items()
def __repr__(self) -> str:
values = ' '.join('%s=%r' % item for item in self.__dict__.items())
return f'<AuditLogDiff {values}>'
if TYPE_CHECKING:
def __getattr__(self, item: str) -> Any:
...
def __setattr__(self, key: str, value: Any) -> Any:
...
Transformer = Callable[["AuditLogEntry", Any], Any]
class AuditLogChanges:
# fmt: off
TRANSFORMERS: ClassVar[Dict[str, Tuple[Optional[str], Optional[Transformer]]]] = {
'verification_level': (None, _enum_transformer(enums.VerificationLevel)),
'explicit_content_filter': (None, _enum_transformer(enums.ContentFilter)),
'allow': (None, _flag_transformer(Permissions)),
'deny': (None, _flag_transformer(Permissions)),
'permissions': (None, _flag_transformer(Permissions)),
'id': (None, _transform_snowflake),
'color': ('colour', _transform_color),
'owner_id': ('owner', _transform_member_id),
'inviter_id': ('inviter', _transform_member_id),
'channel_id': ('channel', _transform_channel),
'afk_channel_id': ('afk_channel', _transform_channel),
'system_channel_id': ('system_channel', _transform_channel),
'system_channel_flags': (None, _flag_transformer(flags.SystemChannelFlags)),
'widget_channel_id': ('widget_channel', _transform_channel),
'rules_channel_id': ('rules_channel', _transform_channel),
'public_updates_channel_id': ('public_updates_channel', _transform_channel),
'permission_overwrites': ('overwrites', _transform_overwrites),
'splash_hash': ('splash', _guild_hash_transformer('splashes')),
'banner_hash': ('banner', _guild_hash_transformer('banners')),
'discovery_splash_hash': ('discovery_splash', _guild_hash_transformer('discovery-splashes')),
'icon_hash': ('icon', _transform_icon),
'avatar_hash': ('avatar', _transform_avatar),
'rate_limit_per_user': ('slowmode_delay', None),
'guild_id': ('guild', _transform_guild_id),
'tags': ('emoji', None),
'default_message_notifications': ('default_notifications', _enum_transformer(enums.NotificationLevel)),
'video_quality_mode': (None, _enum_transformer(enums.VideoQualityMode)),
'privacy_level': (None, _enum_transformer(enums.PrivacyLevel)),
'format_type': (None, _enum_transformer(enums.StickerFormatType)),
'type': (None, _transform_type),
'communication_disabled_until': ('timed_out_until', _transform_timestamp),
'expire_behavior': (None, _enum_transformer(enums.ExpireBehaviour)),
'mfa_level': (None, _enum_transformer(enums.MFALevel)),
'status': (None, _enum_transformer(enums.EventStatus)),
'entity_type': (None, _enum_transformer(enums.EntityType)),
}
# fmt: on
def __init__(self, entry: AuditLogEntry, data: List[AuditLogChangePayload]):
self.before = AuditLogDiff()
self.after = AuditLogDiff()
for elem in data:
attr = elem['key']
# special cases for role add/remove
if attr == '$add':
self._handle_role(self.before, self.after, entry, elem['new_value']) # type: ignore - new_value is a list of roles in this case
continue
elif attr == '$remove':
self._handle_role(self.after, self.before, entry, elem['new_value']) # type: ignore - new_value is a list of roles in this case
continue
try:
key, transformer = self.TRANSFORMERS[attr]
except (ValueError, KeyError):
transformer = None
else:
if key:
attr = key
transformer: Optional[Transformer]
try:
before = elem['old_value']
except KeyError:
before = None
else:
if transformer:
before = transformer(entry, before)
setattr(self.before, attr, before)
try:
after = elem['new_value']
except KeyError:
after = None
else:
if transformer:
after = transformer(entry, after)
setattr(self.after, attr, after)
# add an alias
if hasattr(self.after, 'colour'):
self.after.color = self.after.colour
self.before.color = self.before.colour
if hasattr(self.after, 'expire_behavior'):
self.after.expire_behaviour = self.after.expire_behavior
self.before.expire_behaviour = self.before.expire_behavior
def __repr__(self) -> str:
return f'<AuditLogChanges before={self.before!r} after={self.after!r}>'
def _handle_role(self, first: AuditLogDiff, second: AuditLogDiff, entry: AuditLogEntry, elem: List[RolePayload]) -> None:
if not hasattr(first, 'roles'):
setattr(first, 'roles', [])
data = []
g: Guild = entry.guild
for e in elem:
role_id = int(e['id'])
role = g.get_role(role_id)
if role is None:
role = Object(id=role_id)
role.name = e['name'] # type: ignore - Object doesn't usually have name
data.append(role)
setattr(second, 'roles', data)
class _AuditLogProxy:
def __init__(self, **kwargs: Any) -> None:
for k, v in kwargs.items():
setattr(self, k, v)
class _AuditLogProxyMemberPrune(_AuditLogProxy):
delete_member_days: int
members_removed: int
class _AuditLogProxyMemberMoveOrMessageDelete(_AuditLogProxy):
channel: abc.GuildChannel
count: int
class _AuditLogProxyMemberDisconnect(_AuditLogProxy):
count: int
class _AuditLogProxyPinAction(_AuditLogProxy):
channel: abc.GuildChannel
message_id: int
class _AuditLogProxyStageInstanceAction(_AuditLogProxy):
channel: abc.GuildChannel
class AuditLogEntry(Hashable):
r"""Represents an Audit Log entry.
You retrieve these via :meth:`Guild.audit_logs`.
.. container:: operations
.. describe:: x == y
Checks if two entries are equal.
.. describe:: x != y
Checks if two entries are not equal.
.. describe:: hash(x)
Returns the entry's hash.
.. versionchanged:: 1.7
Audit log entries are now comparable and hashable.
Attributes
-----------
action: :class:`AuditLogAction`
The action that was done.
user: :class:`abc.User`
The user who initiated this action. Usually a :class:`Member`\, unless gone
then it's a :class:`User`.
id: :class:`int`
The entry ID.
target: Any
The target that got changed. The exact type of this depends on
the action being done.
reason: Optional[:class:`str`]
The reason this action was done.
extra: Any
Extra information that this entry has that might be useful.
For most actions, this is ``None``. However in some cases it
contains extra information. See :class:`AuditLogAction` for
which actions have this field filled out.
"""
def __init__(self, *, users: Dict[int, User], data: AuditLogEntryPayload, guild: Guild):
self._state = guild._state
self.guild = guild
self._users = users
self._from_data(data)
def _from_data(self, data: AuditLogEntryPayload) -> None:
self.action = enums.try_enum(enums.AuditLogAction, data['action_type'])
self.id = int(data['id'])
# this key is technically not usually present
self.reason = data.get('reason')
extra = data.get('options')
# fmt: off
self.extra: Union[
_AuditLogProxyMemberPrune,
_AuditLogProxyMemberMoveOrMessageDelete,
_AuditLogProxyMemberDisconnect,
_AuditLogProxyPinAction,
_AuditLogProxyStageInstanceAction,
Member, User, None,
Role, Object
] = None
# fmt: on
if isinstance(self.action, enums.AuditLogAction) and extra:
if self.action is enums.AuditLogAction.member_prune:
# member prune has two keys with useful information
self.extra = _AuditLogProxyMemberPrune(
delete_member_days=int(extra['delete_member_days']),
members_removed=int(extra['members_removed']),
)
elif self.action is enums.AuditLogAction.member_move or self.action is enums.AuditLogAction.message_delete:
channel_id = int(extra['channel_id'])
self.extra = _AuditLogProxyMemberMoveOrMessageDelete(
count=int(extra['count']),
channel=self.guild.get_channel(channel_id) or Object(id=channel_id),
)
elif self.action is enums.AuditLogAction.member_disconnect:
# The member disconnect action has a dict with some information
self.extra = _AuditLogProxyMemberDisconnect(count=int(extra['count']))
elif self.action.name.endswith('pin'):
# the pin actions have a dict with some information
channel_id = int(extra['channel_id'])
self.extra = _AuditLogProxyPinAction(
channel=self.guild.get_channel(channel_id) or Object(id=channel_id),
message_id=int(extra['message_id']),
)
elif self.action.name.startswith('overwrite_'):
# the overwrite_ actions have a dict with some information
instance_id = int(extra['id'])
the_type = extra.get('type')
if the_type == '1':
self.extra = self._get_member(instance_id)
elif the_type == '0':
role = self.guild.get_role(instance_id)
if role is None:
role = Object(id=instance_id)
role.name = self.extra.get('role_name') # type: ignore - Object doesn't usually have name
self.extra = role
elif self.action.name.startswith('stage_instance'):
channel_id = int(extra['channel_id'])
self.extra = _AuditLogProxyStageInstanceAction(
channel=self.guild.get_channel(channel_id) or Object(id=channel_id)
)
# this key is not present when the above is present, typically.
# It's a list of { new_value: a, old_value: b, key: c }
# where new_value and old_value are not guaranteed to be there depending
# on the action type, so let's just fetch it for now and only turn it
# into meaningful data when requested
self._changes = data.get('changes', [])
user_id = utils._get_as_snowflake(data, 'user_id')
self.user = user_id and self._get_member(user_id)
self._target_id = utils._get_as_snowflake(data, 'target_id')
def _get_member(self, user_id: int) -> Union[Member, User, None]:
return self.guild.get_member(user_id) or self._users.get(user_id)
def __repr__(self) -> str:
return f'<AuditLogEntry id={self.id} action={self.action} user={self.user!r}>'
@utils.cached_property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the entry's creation time in UTC."""
return utils.snowflake_time(self.id)
@utils.cached_property
def target(self) -> TargetType:
if self._target_id is None or self.action.target_type is None:
return None
try:
converter = getattr(self, '_convert_target_' + self.action.target_type)
except AttributeError:
return Object(id=self._target_id)
else:
return converter(self._target_id)
@utils.cached_property
def category(self) -> Optional[enums.AuditLogActionCategory]:
"""Optional[:class:`AuditLogActionCategory`]: The category of the action, if applicable."""
return self.action.category
@utils.cached_property
def changes(self) -> AuditLogChanges:
""":class:`AuditLogChanges`: The list of changes this entry has."""
obj = AuditLogChanges(self, self._changes)
del self._changes
return obj
@utils.cached_property
def before(self) -> AuditLogDiff:
""":class:`AuditLogDiff`: The target's prior state."""
return self.changes.before
@utils.cached_property
def after(self) -> AuditLogDiff:
""":class:`AuditLogDiff`: The target's subsequent state."""
return self.changes.after
def _convert_target_guild(self, target_id: int) -> Guild:
return self.guild
def _convert_target_channel(self, target_id: int) -> Union[abc.GuildChannel, Object]:
return self.guild.get_channel(target_id) or Object(id=target_id)
def _convert_target_user(self, target_id: int) -> Union[Member, User, None]:
return self._get_member(target_id)
def _convert_target_role(self, target_id: int) -> Union[Role, Object]:
return self.guild.get_role(target_id) or Object(id=target_id)
def _convert_target_invite(self, target_id: int) -> Invite:
# invites have target_id set to null
# so figure out which change has the full invite data
changeset = self.before if self.action is enums.AuditLogAction.invite_delete else self.after
fake_payload: InvitePayload = {
'max_age': changeset.max_age,
'max_uses': changeset.max_uses,
'code': changeset.code,
'temporary': changeset.temporary,
'uses': changeset.uses,
'channel': None, # type: ignore - the channel is passed to the Invite constructor directly
}
obj = Invite(state=self._state, data=fake_payload, guild=self.guild, channel=changeset.channel)
try:
obj.inviter = changeset.inviter
except AttributeError:
pass
return obj
def _convert_target_emoji(self, target_id: int) -> Union[Emoji, Object]:
return self._state.get_emoji(target_id) or Object(id=target_id)
def _convert_target_message(self, target_id: int) -> Union[Member, User, None]:
return self._get_member(target_id)
def _convert_target_stage_instance(self, target_id: int) -> Union[StageInstance, Object]:
return self.guild.get_stage_instance(target_id) or Object(id=target_id)
def _convert_target_sticker(self, target_id: int) -> Union[GuildSticker, Object]:
return self._state.get_sticker(target_id) or Object(id=target_id)
def _convert_target_thread(self, target_id: int) -> Union[Thread, Object]:
return self.guild.get_thread(target_id) or Object(id=target_id)
def _convert_target_guild_scheduled_event(self, target_id: int) -> Union[ScheduledEvent, Object]:
return self.guild.get_scheduled_event(target_id) or Object(id=target_id)
|
the-stack_106_19520
|
'''
Created by auto_sdk on 2020.05.19
'''
from aliexpress.top.api.base import RestApi
class AliexpressAffiliateProductdetailGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.app_signature = None
self.fields = None
self.product_ids = None
self.target_currency = None
self.target_language = None
self.tracking_id = None
def getapiname(self):
return 'aliexpress.affiliate.productdetail.get'
|
the-stack_106_19521
|
# Write a program (using functions!) that asks the user for a long string containing multiple words. Print back to the user the same string, except with the words in backwards order. For example, say I type the string:
# My name is Michele
# Then I would see the string:
# Michele is name My
# shown back to me.
sentence = input("Sentence: ")
words = sentence.split(" ")
for x in range(len(words)):
print(words[-x-1] + (", " if x != len(words)-1 else "") ,end="")
# print()
|
the-stack_106_19522
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDatrie(PythonPackage):
"""Super-fast, efficiently stored Trie for Python (2.x and 3.x). Uses libdatrie."""
pypi = "datrie/datrie-0.8.2.tar.gz"
maintainers = ['marcusboden']
version('0.8.2', '525b08f638d5cf6115df6ccd818e5a01298cd230b2dac91c8ff2e6499d18765d')
depends_on('[email protected]:2.8,3.4:', type=('build', 'run'))
depends_on('[email protected]:', type=('build'))
depends_on('[email protected]:', type='build')
depends_on('py-pytest-runner', type='build')
|
the-stack_106_19524
|
#Compounding style
Simple = 0 # 1+rt
Compounded = 1 # (1+r)^t
Continuous = 2 # e^{rt}
SimpleThenCompounded = 3 # Simple up to the first period then Compounded
def compounding_from_name(name):
dic = {'Simple': Simple,
'Compounded': Compounded,
'Continuous': Continuous,
'SimpleThenCompounded': SimpleThenCompounded}
try:
cp = dic[name]
return cp
except ValueError:
print('Compounding style %s is unknown' % name)
|
the-stack_106_19526
|
import re
from itertools import permutations
def calculate(prev, next, op):
if op == '+': return prev + next
elif op == '-': return prev - next
else: return prev * next
def getNodes(string):
numbers = re.findall(r'(\d+)', string)
operators = re.findall(r'[+*-]', string)
return numbers, operators
def convertQueue(numbers, operators):
que = []
for i in range(len(numbers)):
que.append(numbers[i])
if i < len(operators):
que.append(operators[i])
return que
def solution(expression):
answer = 0
nums, ops = getNodes(expression)
# operators priority
operators = set(ops)
combos = permutations(operators, len(operators))
# make queue
que = convertQueue(nums, ops)
for combo in combos:
# search
tempOP = list(combo).copy()
tempQue = que.copy()
while tempOP:
front = tempOP.pop(0)
i = 0
while i + 1 < len(tempQue):
if tempQue[i + 1] == front:
prev = tempQue.pop(i)
curr = tempQue.pop(i)
next = tempQue.pop(i)
now = str(calculate(int(prev), int(next), curr))
tempQue.insert(i, now)
else:
i += 1
if len(tempQue) == 1:
res = abs(int(tempQue[0]))
if res > answer: answer = res
return answer
print(solution("200-300-500-600*40+500+500"))
# print(solution("100-200*300-500+20"))
# print(solution("50*6-3*2"))
|
the-stack_106_19528
|
import unittest
from glif import parsing
from glif import Glif
from glif import commands
from glif import utils
class TestBasicParsing(unittest.TestCase):
def test_parse_argument(self):
def test(s,k,v):
pr = parsing.parseCommandArg(s)
if not pr.success:
print(pr.logs)
self.assertTrue(pr.success)
expected = parsing.CommandArgument(key=k, value=v)
self.assertEqual(pr.value[0], expected)
test('-lang=Eng "test"', 'lang', 'Eng')
test('-lang=Eng', 'lang', 'Eng')
test('-val="test"', 'val', 'test')
test('-val=""', 'val', '')
test('-val="te st\\\\" " "', 'val', 'te st\\')
test('-val="test\\""', 'val', 'test"')
test('-level=3', 'level', '3')
test('-simple', 'simple', '')
test('-simple "test"', 'simple', '')
class TestCommandParsing(unittest.TestCase):
def parseBCtest(self, cmdStr: str, cmdname: str = '', args: list[tuple[str,str]] = [], mainargs: list[str] = [], success: bool = True, remainder: str = ''):
cmd = parsing.parseBasicCommand(cmdStr)
if success:
self.assertTrue(cmd.success)
assert cmd.value
self.assertEqual(cmd.value[1].strip(), remainder)
self.assertEqual(len(cmd.value[0].args), len(args))
for (a,b) in zip(args, cmd.value[0].args):
self.assertEqual(a[0], b.key)
self.assertEqual(a[1], b.value)
self.assertEqual(len(cmd.value[0].mainargs), len(mainargs))
for (x,y) in zip(mainargs, cmd.value[0].mainargs):
self.assertEqual(x, y)
else:
self.assertFalse(cmd.success)
def test_basic(self):
self.parseBCtest('parse -lang=Eng "hello world"', 'parse', [('lang', 'Eng')], ['hello world'])
self.parseBCtest('parse -lang=Eng -cat=S', 'parse', [('lang', 'Eng'), ('cat', 'S')])
self.parseBCtest('parse -lang=Eng "hello world" | l -lang=Ger', 'parse', [('lang', 'Eng')], ['hello world'], remainder='l -lang=Ger')
self.parseBCtest('parse "hello world"', 'parse', [], ['hello world'])
self.parseBCtest('parse', 'parse')
self.parseBCtest('l -lang=Eng hello', 'l', [('lang','Eng')], ['hello'])
self.parseBCtest('l -lang=Eng exclaim (hello world)', 'l', [('lang','Eng')], ['exclaim (hello world)'])
def test_incomplete(self):
self.parseBCtest('parse -lang=', success=False)
self.parseBCtest('parse -lang=Eng "hello world', success=False)
self.parseBCtest('parse -', success=False)
def test_args(self):
self.parseBCtest('c -arg="value"', 'c', [('arg', 'value')])
self.parseBCtest('c -bracket', 'c', [('bracket', '')])
self.parseBCtest('c -depth=3', 'c', [('depth', '3')])
self.parseBCtest('c -ignore-includes', 'c', [('ignore-includes', '')])
self.parseBCtest('c -file=input.txt', 'c', [('file', 'input.txt')])
def test_escape(self):
self.parseBCtest('parse "\\""', 'parse', [], ['"'])
self.parseBCtest('parse "\\"', success=False)
self.parseBCtest('parse "\\\\"', 'parse', [], ['\\'])
self.parseBCtest('parse "|" | l', 'parse', [], ['|'], remainder='l')
def test_space(self):
self.parseBCtest('parse -lang=Eng "hello world" | l -lang=Ger', 'parse', [('lang', 'Eng')], ['hello world'], remainder='l -lang=Ger')
self.parseBCtest('parse -lang=Eng "hello world"| l -lang=Ger', 'parse', [('lang', 'Eng')], ['hello world'], remainder='l -lang=Ger')
def test_mainargs(self):
self.parseBCtest('parse "hello" "world"', 'parse', [], ['hello', 'world'])
class TestFileIdentification(unittest.TestCase):
def idTest(self, content: str, expected: utils.Result[tuple[str,str]]):
r = parsing.identifyFile(content)
if expected.success:
self.assertTrue(r.success)
assert r.value
self.assertEqual(r.value[0:2], expected.value)
else:
self.assertFalse(r.success)
def test_basic(self):
self.idTest('abstract Grammar = { cat T; }', utils.Result(True, ('gf-abstract', 'Grammar')))
self.idTest('concrete GrammarEng of Grammar = { lin T = Str; }', utils.Result(True, ('gf-concrete', 'GrammarEng')))
self.idTest('theory DDT : ur:?LF = ❚', utils.Result(True, ('mmt-theory', 'DDT')))
self.idTest('view V : ?A -> ?B = ❚', utils.Result(True, ('mmt-view', 'V')))
self.idTest('parse "Hello world"', utils.Result(False))
self.idTest('-- The abstract syntax\nabstract Grammar = { cat T; }', utils.Result(True, ('gf-abstract', 'Grammar')))
self.idTest('// Example MMT theory ❚ theory DDT : ur:?LF = ❚', utils.Result(True, ('mmt-theory', 'DDT')))
if __name__ == '__main__':
unittest.main()
|
the-stack_106_19529
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe, unittest
class TestDynamicLinks(unittest.TestCase):
def setUp(self):
frappe.db.sql('delete from `tabEmail Unsubscribe`')
def test_delete_normal(self):
event = frappe.get_doc({
'doctype': 'Event',
'subject':'test-for-delete',
'starts_on': '2014-01-01',
'event_type': 'Public'
}).insert()
unsub = frappe.get_doc({
'doctype': 'Email Unsubscribe',
'email': '[email protected]',
'reference_doctype': event.doctype,
'reference_name': event.name
}).insert()
event.delete()
self.assertFalse(frappe.db.exists('Email Unsubscribe', unsub.name))
def test_delete_with_comment(self):
event = frappe.get_doc({
'doctype': 'Event',
'subject':'test-for-delete-1',
'starts_on': '2014-01-01',
'event_type': 'Public'
}).insert()
event.add_comment('Comment', 'test')
self.assertTrue(frappe.get_all('Comment',
filters={'reference_doctype':'Event', 'reference_name':event.name}))
event.delete()
self.assertFalse(frappe.get_all('Comment',
filters={'reference_doctype':'Event', 'reference_name':event.name}))
def test_custom_fields(self):
from frappe.utils.testutils import add_custom_field, clear_custom_fields
add_custom_field('Event', 'test_ref_doc', 'Link', 'DocType')
add_custom_field('Event', 'test_ref_name', 'Dynamic Link', 'test_ref_doc')
unsub = frappe.get_doc({
'doctype': 'Email Unsubscribe',
'email': '[email protected]',
'global_unsubscribe': 1
}).insert()
event = frappe.get_doc({
'doctype': 'Event',
'subject':'test-for-delete-2',
'starts_on': '2014-01-01',
'event_type': 'Public',
'test_ref_doc': unsub.doctype,
'test_ref_name': unsub.name
}).insert()
self.assertRaises(frappe.LinkExistsError, unsub.delete)
event.test_ref_doc = None
event.test_ref_name = None
event.save()
unsub.delete()
clear_custom_fields('Event')
|
the-stack_106_19530
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# fake_enrichment documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import fake_enrichment
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'fake_enrichment'
copyright = u"2018, Chris Churas"
author = u"Chris Churas"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = fake_enrichment.__version__
# The full version, including alpha/beta/rc tags.
release = fake_enrichment.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'fake_enrichment_restdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'fake_enrichment.tex',
u'fake_enrichment Documentation',
u'Chris Churas', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fake_enrichment',
u'fake_enrichment Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'fake_enrichment',
u'fake_enrichment Documentation',
author,
'fake_enrichment',
'One line description of project.',
'Miscellaneous'),
]
|
the-stack_106_19532
|
import unittest
from aiohttp.test_utils import unittest_run_loop
from ws.tests.testcase import MyHomeTestCase
class ApplianceTestCase(MyHomeTestCase):
@unittest_run_loop
async def test_get(self):
for collection in self.app.resources.appliances:
for appliance in self.app.resources.appliances[collection]:
request = await self.client.request(
"GET", "/appliance/{}".format(appliance.name.replace(" ", "%20"))
)
assert request.status == 200
text = await request.text()
assert appliance.name in text
@unittest_run_loop
async def test_post(self):
request = await self.client.request(
"POST",
"/appliance/simple%20light",
data={
"module": "home.appliance.light.event.forced",
"klass": "Event",
"value": "Off",
},
)
assert request.status == 200
text = await request.text()
assert "simple light" in text
assert "Off" in text
if __name__ == "__main__":
unittest.main()
|
the-stack_106_19533
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''
Albert finetune and evaluation script.
'''
import os
from src.Albert_Callback_class import albert_callback
from src.albert_for_finetune import AlbertFinetuneCell, AlbertCLS
from src.dataset import create_classification_dataset
from src.assessment_method import Accuracy, F1, MCC, Spearman_Correlation
from src.utils import make_directory, LossCallBack, LoadNewestCkpt, AlbertLearningRate
from src.model_utils.config import config as args_opt, optimizer_cfg, albert_net_cfg
from src.model_utils.moxing_adapter import moxing_wrapper
from src.model_utils.device_adapter import get_device_id, get_device_num
from mindspore import context
from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
from mindspore.nn.optim import AdamWeightDecay, Lamb, Momentum
from mindspore.train.model import Model
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, TimeMonitor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.context import ParallelMode
from mindspore.communication.management import init, get_rank
from mindspore.common import set_seed
_cur_dir = os.getcwd()
def do_train(dataset=None, ds_eval=None, network=None, load_checkpoint_path="",
save_checkpoint_path="", epoch_num=1, args=None):
""" do train """
if load_checkpoint_path == "":
raise ValueError("Pretrain model missed, finetune task must load pretrain model!")
steps_per_epoch = dataset.get_dataset_size()
print("========steps_per_epoch: ========", steps_per_epoch)
# optimizer
if optimizer_cfg.optimizer == 'AdamWeightDecay':
lr_schedule = AlbertLearningRate(learning_rate=optimizer_cfg.AdamWeightDecay.learning_rate,
end_learning_rate=optimizer_cfg.AdamWeightDecay.end_learning_rate,
warmup_steps=optimizer_cfg.AdamWeightDecay.warmup_steps,
decay_steps=steps_per_epoch * epoch_num,
power=optimizer_cfg.AdamWeightDecay.power)
params = network.trainable_params()
decay_params = list(filter(optimizer_cfg.AdamWeightDecay.decay_filter, params))
other_params = list(filter(lambda x: not optimizer_cfg.AdamWeightDecay.decay_filter(x), params))
group_params = [{'params': decay_params, 'weight_decay': optimizer_cfg.AdamWeightDecay.weight_decay},
{'params': other_params, 'weight_decay': 0.0}]
optimizer = AdamWeightDecay(group_params, lr_schedule, eps=optimizer_cfg.AdamWeightDecay.eps)
elif optimizer_cfg.optimizer == 'Lamb':
lr_schedule = AlbertLearningRate(learning_rate=optimizer_cfg.Lamb.learning_rate,
end_learning_rate=optimizer_cfg.Lamb.end_learning_rate,
warmup_steps=optimizer_cfg.Lamb.warmup_steps,
decay_steps=steps_per_epoch * epoch_num,
power=optimizer_cfg.Lamb.power)
optimizer = Lamb(network.trainable_params(), learning_rate=lr_schedule)
elif optimizer_cfg.optimizer == 'Momentum':
optimizer = Momentum(network.trainable_params(), learning_rate=optimizer_cfg.Momentum.learning_rate,
momentum=optimizer_cfg.Momentum.momentum)
else:
raise Exception("Optimizer not supported. support: [AdamWeightDecay, Lamb, Momentum]")
# load checkpoint into network
ckpt_config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch, keep_checkpoint_max=1)
ckpoint_cb = ModelCheckpoint(prefix="classifier",
directory=None if save_checkpoint_path == "" else save_checkpoint_path,
config=ckpt_config)
param_dict = load_checkpoint(load_checkpoint_path)
load_param_into_net(network, param_dict)
update_cell = DynamicLossScaleUpdateCell(loss_scale_value=2 ** 32, scale_factor=2, scale_window=1000)
netwithgrads = AlbertFinetuneCell(network, optimizer=optimizer, scale_update_cell=update_cell)
model = Model(netwithgrads)
eval_callback = albert_callback(netwithgrads, args, steps_per_epoch, ds_eval, save_checkpoint_path)
model.train(epoch_num, dataset, callbacks=[TimeMonitor(dataset.get_dataset_size()), eval_callback,
LossCallBack(dataset.get_dataset_size()), ckpoint_cb])
def eval_result_print(assessment_method="accuracy", callback=None):
""" print eval result """
f1 = 0.0
if assessment_method == "accuracy":
print("acc_num {} , total_num {}, accuracy {:.6f}".format(callback.acc_num, callback.total_num,
callback.acc_num / callback.total_num))
elif assessment_method == "f1":
print("Precision {:.6f} ".format(callback.TP / (callback.TP + callback.FP)))
print("Recall {:.6f} ".format(callback.TP / (callback.TP + callback.FN)))
print("F1 {:.6f} ".format(2 * callback.TP / (2 * callback.TP + callback.FP + callback.FN)))
f1 = round(2 * callback.TP / (2 * callback.TP + callback.FP + callback.FN), 6)
elif assessment_method == "mcc":
print("MCC {:.6f} ".format(callback.cal()))
elif assessment_method == "spearman_correlation":
print("Spearman Correlation is {:.6f} ".format(callback.cal()[0]))
else:
raise ValueError("Assessment method not supported, support: [accuracy, f1, mcc, spearman_correlation]")
return f1
def do_eval(dataset=None, network=None, num_class=2, assessment_method="accuracy", load_checkpoint_path=""):
""" do eval """
if load_checkpoint_path == "":
raise ValueError("Finetune model missed, evaluation task must load finetune model!")
net_for_pretraining = network(albert_net_cfg, False, num_class)
net_for_pretraining.set_train(False)
param_dict = load_checkpoint(load_checkpoint_path)
load_param_into_net(net_for_pretraining, param_dict)
model = Model(net_for_pretraining)
if assessment_method == "accuracy":
callback = Accuracy()
elif assessment_method == "f1":
callback = F1(False, num_class)
elif assessment_method == "mcc":
callback = MCC()
elif assessment_method == "spearman_correlation":
callback = Spearman_Correlation()
else:
raise ValueError("Assessment method not supported, support: [accuracy, f1, mcc, spearman_correlation]")
columns_list = ["input_ids", "input_mask", "segment_ids", "label_ids"]
for data in dataset.create_dict_iterator(num_epochs=1):
input_data = []
for i in columns_list:
input_data.append(data[i])
input_ids, input_mask, token_type_id, label_ids = input_data
logits = model.predict(input_ids, input_mask, token_type_id, label_ids)
callback.update(logits, label_ids)
print("==============================================================")
eval_result_print(assessment_method, callback)
print("==============================================================")
def modelarts_pre_process():
'''modelarts pre process function.'''
args_opt.device_id = get_device_id()
args_opt.device_num = get_device_num()
args_opt.load_pretrain_checkpoint_path = os.path.join(args_opt.load_path, args_opt.load_pretrain_checkpoint_path)
args_opt.load_finetune_checkpoint_path = os.path.join(args_opt.output_path, args_opt.load_finetune_checkpoint_path)
args_opt.save_finetune_checkpoint_path = os.path.join(args_opt.output_path, args_opt.save_finetune_checkpoint_path)
if args_opt.schema_file_path:
args_opt.schema_file_path = os.path.join(args_opt.data_path, args_opt.schema_file_path)
args_opt.train_data_file_path = os.path.join(args_opt.data_path, args_opt.train_data_file_path)
args_opt.eval_data_file_path = os.path.join(args_opt.data_path, args_opt.eval_data_file_path)
@moxing_wrapper(pre_process=modelarts_pre_process)
def run_classifier():
"""run classifier task"""
set_seed(45556)
epoch_num = args_opt.epoch_num
assessment_method = args_opt.assessment_method.lower()
load_pretrain_checkpoint_path = args_opt.load_pretrain_checkpoint_path
save_finetune_checkpoint_path = args_opt.save_finetune_checkpoint_path
load_finetune_checkpoint_path = args_opt.load_finetune_checkpoint_path
if args_opt.do_train.lower() == "false" and args_opt.do_eval.lower() == "false":
raise ValueError("At least one of 'do_train' or 'do_eval' must be true")
if args_opt.do_train.lower() == "true" and args_opt.train_data_file_path == "":
raise ValueError("'train_data_file_path' must be set when do finetune task")
if args_opt.do_eval.lower() == "true" and args_opt.eval_data_file_path == "":
raise ValueError("'eval_data_file_path' must be set when do evaluation task")
if args_opt.device_target == "Ascend":
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, device_id=args_opt.device_id)
else:
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target)
context.set_context(reserve_class_name_in_scope=False)
if args_opt.device_target == "GPU":
# Enable graph kernel
context.set_context(enable_graph_kernel=True, graph_kernel_flags="--enable_parallel_fusion")
if args_opt.distribute == 'true':
if args_opt.device_target == "Ascend":
# rank = args_opt.rank_id
device_num = args_opt.device_num
print("device_num: ", device_num)
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
init()
rank = get_rank()
elif args_opt.device_target == "GPU":
init("nccl")
context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL,
gradients_mean=True)
else:
raise ValueError(args_opt.device_target)
save_ckpt_path = os.path.join(args_opt.save_finetune_checkpoint_path, 'ckpt_' + str(get_rank()) + '/')
else:
rank = 0
device_num = 1
save_ckpt_path = os.path.join(args_opt.save_finetune_checkpoint_path, 'ckpt_0/')
make_directory(save_ckpt_path)
netwithloss = AlbertCLS(albert_net_cfg, True, num_labels=args_opt.num_class, dropout_prob=0.1,
assessment_method=assessment_method)
if args_opt.do_train.lower() == "true":
print("create_classification_dataset")
ds = create_classification_dataset(batch_size=args_opt.train_batch_size, repeat_count=1,
assessment_method=assessment_method,
data_file_path=args_opt.train_data_file_path,
schema_file_path=args_opt.schema_file_path,
do_shuffle=(args_opt.train_data_shuffle.lower() == "true"),
rank_size=args_opt.device_num,
rank_id=rank)
ds_eval = create_classification_dataset(batch_size=args_opt.eval_batch_size, repeat_count=1,
assessment_method=assessment_method,
data_file_path=args_opt.eval_data_file_path,
schema_file_path=args_opt.schema_file_path,
do_shuffle=(args_opt.eval_data_shuffle.lower() == "true"),
rank_size=1,
rank_id=0)
do_train(ds, ds_eval, netwithloss, load_pretrain_checkpoint_path, save_ckpt_path, epoch_num, args_opt)
if args_opt.do_eval.lower() == "true":
if save_finetune_checkpoint_path == "":
load_finetune_checkpoint_dir = _cur_dir
else:
load_finetune_checkpoint_dir = make_directory(save_ckpt_path)
load_finetune_checkpoint_path = LoadNewestCkpt(load_finetune_checkpoint_dir,
ds.get_dataset_size(), epoch_num, "classifier")
if args_opt.do_eval.lower() == "true":
ds = create_classification_dataset(batch_size=args_opt.eval_batch_size, repeat_count=1,
assessment_method=assessment_method,
data_file_path=args_opt.eval_data_file_path,
schema_file_path=args_opt.schema_file_path,
do_shuffle=(args_opt.eval_data_shuffle.lower() == "true"))
do_eval(ds, AlbertCLS, args_opt.num_class, assessment_method, load_finetune_checkpoint_path)
if __name__ == "__main__":
run_classifier()
|
the-stack_106_19534
|
# Copyright (c) 2017 Quilt Data, Inc. All rights reserved.
"""
Version tests
"""
import json
import requests
from unittest.mock import patch
from quilt_server.core import hash_contents, GroupNode, RootNode
from .utils import QuiltTestCase
class VersionTestCase(QuiltTestCase):
"""
Test version endpoints.
"""
def setUp(self):
super(VersionTestCase, self).setUp()
self.user = "test_user"
self.pkg = "pkg"
self.contents_list = [
RootNode(dict(
foo=GroupNode(dict())
)),
RootNode(dict(
bar=GroupNode(dict())
)),
RootNode(dict(
baz=GroupNode(dict())
))
]
self.hashes = [hash_contents(contents) for contents in self.contents_list]
# Upload three package instances.
for contents in self.contents_list:
self.put_package(self.user, self.pkg, contents)
def _add_version(self, version, pkghash):
return self.app.put(
'/api/version/{usr}/{pkg}/{version}'.format(
usr=self.user,
pkg=self.pkg,
version=version
),
data=json.dumps(dict(
hash=pkghash
)),
content_type='application/json',
headers={
'Authorization': self.user
}
)
def testGetVersion(self):
resp = self._add_version('1', self.hashes[0])
assert resp.status_code == requests.codes.ok
# Access the same version.
resp = self.app.get(
'/api/version/{usr}/{pkg}/{version}'.format(
usr=self.user,
pkg=self.pkg,
version='1'
),
headers={
'Authorization': self.user
}
)
assert resp.status_code == requests.codes.ok
data = json.loads(resp.data.decode('utf8'))
assert data['hash'] == self.hashes[0]
# Access the same version, but with whitespace.
resp = self.app.get(
'/api/version/{usr}/{pkg}/{version}'.format(
usr=self.user,
pkg=self.pkg,
version=' 1\t'
),
headers={
'Authorization': self.user
}
)
assert resp.status_code == requests.codes.ok
data = json.loads(resp.data.decode('utf8'))
assert data['hash'] == self.hashes[0]
assert data['created_by'] == data['updated_by'] == self.user
assert data['created_at'] == data['updated_at']
def testListVersions(self):
# Add a few versions in a random order, with random whitespace.
resp = self._add_version('2.0.1+foo123', self.hashes[2])
assert resp.status_code == requests.codes.ok
resp = self._add_version(' 2.0 ', self.hashes[1])
assert resp.status_code == requests.codes.ok
resp = self._add_version('1.0', self.hashes[0])
assert resp.status_code == requests.codes.ok
resp = self._add_version('2.0pre1', self.hashes[1])
assert resp.status_code == requests.codes.ok
# List versions.
resp = self.app.get(
'/api/version/{usr}/{pkg}/'.format(
usr=self.user,
pkg=self.pkg
),
headers={
'Authorization': self.user
}
)
assert resp.status_code == requests.codes.ok
# Verify that the response is sorted by version,
# but preserves the original formatting - whitespace, etc.
data = json.loads(resp.data.decode('utf8'))
versions = data['versions']
assert versions == [
dict(
version='1.0',
hash=self.hashes[0]
),
dict(
version='2.0pre1',
hash=self.hashes[1]
),
dict(
version=' 2.0 ',
hash=self.hashes[1]
),
dict(
version='2.0.1+foo123',
hash=self.hashes[2]
)
]
def testInvalidVersion(self):
resp = self._add_version('foo', self.hashes[0])
assert resp.status_code == requests.codes.bad_request
data = json.loads(resp.data.decode('utf8'))
assert 'message' in data
resp = self._add_version('1x', self.hashes[0])
assert resp.status_code == requests.codes.bad_request
data = json.loads(resp.data.decode('utf8'))
assert 'message' in data
resp = self._add_version('1. 0', self.hashes[0])
assert resp.status_code == requests.codes.bad_request
data = json.loads(resp.data.decode('utf8'))
assert 'message' in data
def testInvalidHash(self):
resp = self._add_version('1.0', '000')
assert resp.status_code == requests.codes.not_found
data = json.loads(resp.data.decode('utf8'))
assert 'message' in data
def testDuplicateVersion(self):
resp = self._add_version('1.0', self.hashes[0])
assert resp.status_code == requests.codes.ok
# Same hash
resp = self._add_version('1.0 ', self.hashes[0])
assert resp.status_code == requests.codes.conflict
data = json.loads(resp.data.decode('utf8'))
assert 'message' in data
# Different hash
resp = self._add_version(' 1.0 ', self.hashes[1])
assert resp.status_code == requests.codes.conflict
data = json.loads(resp.data.decode('utf8'))
assert 'message' in data
def testDelete(self):
resp = self._add_version('1.0', self.hashes[0])
assert resp.status_code == requests.codes.ok
resp = self.app.delete(
'/api/version/{usr}/{pkg}/{version}'.format(
usr=self.user,
pkg=self.pkg,
version='1.0'
),
headers={
'Authorization': self.user
}
)
assert resp.status_code == requests.codes.method_not_allowed
def testAccess(self):
resp = self._add_version('1.0', self.hashes[0])
assert resp.status_code == requests.codes.ok
sharewith = "share_with"
resp = self._share_package(self.user, self.pkg, sharewith)
assert resp.status_code == requests.codes.ok
# Can view
resp = self.app.get(
'/api/version/{usr}/{pkg}/{version}'.format(
usr=self.user,
pkg=self.pkg,
version='1.0'
),
headers={
'Authorization': sharewith
}
)
assert resp.status_code == requests.codes.ok
# Can't modify
resp = self.app.put(
'/api/version/{usr}/{pkg}/{version}'.format(
usr=self.user,
pkg=self.pkg,
version='1.0'
),
data=json.dumps(dict(
hash=self.hashes[1]
)),
content_type='application/json',
headers={
'Authorization': sharewith
}
)
assert resp.status_code == requests.codes.forbidden
data = json.loads(resp.data.decode('utf8'))
assert 'message' in data
|
the-stack_106_19536
|
"""
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.187
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import io
import json
import logging
import re
import ssl
from urllib.parse import urlencode
from urllib.parse import urlparse
from urllib.request import proxy_bypass_environment
import urllib3
import ipaddress
from ory_client.exceptions import ApiException, UnauthorizedException, ForbiddenException, NotFoundException, ServiceException, ApiValueError
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if configuration.retries is not None:
addition_pool_args['retries'] = configuration.retries
if configuration.socket_options is not None:
addition_pool_args['socket_options'] = configuration.socket_options
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy and not should_bypass_proxies(configuration.host, no_proxy=configuration.no_proxy or ''):
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ApiValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, float)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
# Only set a default Content-Type for POST, PUT, PATCH and OPTIONS requests
if (method != 'DELETE') and ('Content-Type' not in headers):
headers['Content-Type'] = 'application/json'
if query_params:
url += '?' + urlencode(query_params)
if ('Content-Type' not in headers) or (re.search('json', headers['Content-Type'], re.IGNORECASE)):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
if r.status == 401:
raise UnauthorizedException(http_resp=r)
if r.status == 403:
raise ForbiddenException(http_resp=r)
if r.status == 404:
raise NotFoundException(http_resp=r)
if 500 <= r.status <= 599:
raise ServiceException(http_resp=r)
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
# end of class RESTClientObject
def is_ipv4(target):
""" Test if IPv4 address or not
"""
try:
chk = ipaddress.IPv4Address(target)
return True
except ipaddress.AddressValueError:
return False
def in_ipv4net(target, net):
""" Test if target belongs to given IPv4 network
"""
try:
nw = ipaddress.IPv4Network(net)
ip = ipaddress.IPv4Address(target)
if ip in nw:
return True
return False
except ipaddress.AddressValueError:
return False
except ipaddress.NetmaskValueError:
return False
def should_bypass_proxies(url, no_proxy=None):
""" Yet another requests.should_bypass_proxies
Test if proxies should not be used for a particular url.
"""
parsed = urlparse(url)
# special cases
if parsed.hostname in [None, '']:
return True
# special cases
if no_proxy in [None , '']:
return False
if no_proxy == '*':
return True
no_proxy = no_proxy.lower().replace(' ','');
entries = (
host for host in no_proxy.split(',') if host
)
if is_ipv4(parsed.hostname):
for item in entries:
if in_ipv4net(parsed.hostname, item):
return True
return proxy_bypass_environment(parsed.hostname, {'no': no_proxy} )
|
the-stack_106_19538
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
LEADER = 1
ROUTER = 2
class Test_MacScan(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1, 3):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[LEADER].set_channel(12)
self.nodes[LEADER].set_network_name('OpenThread')
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ROUTER].set_channel(12)
self.nodes[ROUTER].set_network_name('OpenThread')
def tearDown(self):
for n in list(self.nodes.values()):
n.stop()
n.destroy()
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
results = self.nodes[LEADER].scan()
self.assertEqual(len(results), 16)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_19539
|
import os
import sys
import random
import math
import time
class BadInputError(Exception):
pass
class Player():
def __init__(self, name):
self.id = None
self.name = name
self.type = 'Human'
self.hand = Hand()
self.legalCards = []
self.wildCards = []
self.valueChangeCards = []
self.zeroCards = []
self.canSkip = False
self.canReverse = False
self.canDrawTwo = False
self.canDrawFour = False
self.canValueChange = False
self.drew = False
self.scrollMax = 0
self.points = 0
self.forceDraw = 0
def addCard(self, card):
self.drew = True
if self.forceDraw > 0:
self.forceDraw -= 1
self.drew = False
self.hand.addCard(card)
def beginTurn(self):
self.drew = False
def didDraw(self):
return self.drew
def getLegalCards(self, color, value, zeroChange=False):
self.canSkip = False
self.canReverse = False
self.canDrawTwo = False
self.canDrawFour = False
self.canValueChange = False
self.canZeroChange = False
self.legalCards = []
self.wildCards = []
self.valueChangeCards = []
self.zeroCards = []
plusFours = []
for card in self.hand:
if card.isWild():
if card.getValue() == '+4':
plusFours.append(card)
else:
self.wildCards.append(card)
elif zeroChange and card.isZero():
self.canZero = True
self.zeroCards.append(card)
elif card.getColor() == color or card.getValue() == value:
if card.getColor() != color:
self.canValueChange = True
self.valueChangeCards.append(card)
if card.getValue() == "+2":
self.canDrawTwo = True
elif card.getValue() == 'R':
self.canReverse = True
elif card.getValue() == 'X':
self.canSkip = True
self.legalCards.append(card)
if len(self.legalCards) == 0 and len(plusFours) > 0:
self.canDrawFour = True
self.wildCards += plusFours
def getValidCards(self):
return self.legalCards
def getAllValidCards(self):
return self.legalCards + self.wildCards + self.zeroCards
def hasLegalCard(self):
return len(self.legalCards) > 0
def addPoints(self, amount):
if (self.points + amount) <= 999999999999999999999:
self.points += amount
def removeCard(self, index):
return self.hand.removeCard(index)
def assignID(self, identity):
self.id = identity
def getName(self):
return self.name
def getID(self):
return self.id
def getPoints(self):
return self.points
def getType(self):
return self.type
def getCardNum(self):
return len(self.hand)
def getHand(self, scrollNum=0, hide=False):
return self.hand.show(scrollNum, hide)
def getForceDraws(self):
return self.forceDraw
def addForceDraw(self, num):
self.forceDraw += num
def decreaseForceDraw(self):
self.forceDraw -= 1
def removeForceDraw(self):
self.forceDraw = 0
def checkCard(self, index):
return self.hand.getCard(int(index))
def discardHand(self):
self.hand.discard()
def __str__(self):
return self.name
def __repr__(self):
return '({},{})'.format(self.name, self.points)
class Hand():
''''deck' (Deck) : Card's Color (rgby)
'numberOfCards' (int) : Card's Value (0-9, R, X, W, +2, +4)'''
def __init__(self, deck=None,numberOfCards=0):
self.hand = []
if deck != None:
self.draw(deck,numberOfCards)
def __iter__(self):
return iter(self.hand)
def __len__(self):
return len(self.hand)
def __getitem__(self, item):
try:
return self.hand[item]
except:
return ''
def addCard(self, card):
self.hand.append(card)
def removeCard(self, index):
index = int(index)
if (0 <= index < len(self)):
return self.hand.pop(index)
def discard(self):
self.hand = []
def show(self, scrollNum=0, hide=False):
if scrollNum == -1:
scrollNum = 0
output = ''
num = 0
header, footer, upper, lower = '', '', '', ''
header += ('\033[97m\u2666--\u2666\033[0m ')
upper += ('\033[97m|<-|\033[0m ')
lower += ('\033[97m|<-|\033[0m ')
footer += ('\033[97m\u2666--\u2666\033[0m ')
for i in range(10):
indexNum = i+(10*scrollNum)
if indexNum < len(self):
header += (self[indexNum].getRow(0,hide)+' ')
upper += (self[indexNum].getRow(1,hide)+' ')
lower += (self[indexNum].getRow(2,hide)+' ')
footer += (self[indexNum].getRow(3,hide)+' ')
num += 1
for j in range(10-num):
j #unused
header += (' ')
footer += (' ')
upper += (' ')
lower += (' ')
header += ('\033[97m\u2666--\u2666\033[0m ')
upper += ('\033[97m|->|\033[0m ')
lower += ('\033[97m|->|\033[0m ')
footer += ('\033[97m\u2666--\u2666\033[0m ')
output += (' '+header+'\n '+upper+'\n '+lower+'\n '+footer+'\n\033[97m|-(<)--')
for k in range(num):
output += '({})'.format(k)
output += '--'
for l in range(10-num):
l #unused
output += '-----'
output += '(>)--|\033[0m\n'
return output
def getCard(self, index):
return self.hand[index]
def indexCard(self, card):
return self.hand.index(card)
class GameSettings():
playerIdentities = ('play1','play2','play3','play4')
computerNames = ('Watson','SkyNet','Hal','Metal Gear')
def __init__(self):
self.playerStaging = [] # Where Player Objs Are Stored Before Game Starts
self.players = {} # ID : Player Obj
self.numPlayers = 0
self.useColor = True
self.displayEffects = True
self.hideComputerHands = True
self.zeroChange = False
self.computerSimulation = False
self.mainMenuError = ''
self.computerSpeed = 'normal'
def canAddPlayer(self):
return (self.numPlayers < 4)
def canRemovePlayer(self):
return (self.numPlayers > 0)
def canBegin(self):
return (self.numPlayers > 1)
def addPlayer(self, player):
self.playerStaging.append(player)
self.numPlayers += 1
def removePlayer(self, number):
number -= 1
del self.playerStaging[number]
self.numPlayers -= 1
def clearStaging(self):
self.numPlayers = 0
self.playerStaging = []
def finalizePlayers(self):
self.players.clear()
identity = 0
for player in self.playerStaging:
playerID = self.playerIdentities[identity]
player.assignID(playerID)
self.players[playerID] = player
identity += 1
def getPlayerNum(self):
return self.numPlayers
def getComputerName(self):
complete = False
index = self.numPlayers
while not complete:
name = self.computerNames[index]
complete = True
for player in self.playerStaging:
if player.getName() == name:
index += 1
if index >= len(self.computerNames):
index = 0
complete = False
return self.computerNames[index]
def getRandomIdentity(self):
'''For Getting a Random Player for First Turn.'''
return random.choice(self.players.keys())
def compileMainMenuElements(self):
def getBlankSpace(word, total):
return " "*(total-len(word))
def getPlayerBox(playerNum, rowNum):
if rowNum == 1:
name = self.playerStaging[playerNum-1].getName()
return '{}{}'.format(name, getBlankSpace(name, 29))
elif rowNum == 2:
points = self.playerStaging[playerNum-1].getPoints()
return 'Points: {}{}'.format(points, getBlankSpace(str(points), 21))
self.mainMenuElements= {'play1row1':'No Player ','play1row2':' ',
'play2row1':'No Player ',
'play2row2':' ',
'play3row1':'No Player ','play3row2':' ',
'play4row1':'No Player ',
'play4row2':' ',
'play1box':'\033[90m','play2box':'\033[90m','play3box':'\033[90m','play4box':'\033[90m',
'beginBox':'\033[90m','addBox':'\033[97m','removeBox':'\033[90m'
}
playerBoxKey = 'play{}box'
playerRowKey = 'play{}row{}'
i = 1
for j in self.playerStaging:
j
colorCode = ['\033[91m','\033[94m','\033[92m','\033[93m']
key = playerBoxKey.format(i)
self.mainMenuElements[key] = colorCode[i-1]
self.mainMenuElements[playerRowKey.format(i,1)] = getPlayerBox(i, 1)
self.mainMenuElements[playerRowKey.format(i,2)] = getPlayerBox(i, 2)
i+=1
if self.canBegin():
self.mainMenuElements['beginBox'] = '\033[95m'
if not self.canAddPlayer():
self.mainMenuElements['addBox'] = '\033[90m'
if self.canRemovePlayer():
self.mainMenuElements['removeBox'] = '\033[97m'
def changeComputerSpeed(self):
if self.computerSpeed == 'slow':
self.computerSpeed = 'normal'
elif self.computerSpeed == 'normal':
self.computerSpeed = 'fast'
elif self.computerSpeed == 'fast':
self.computerSpeed = 'slow'
def getMainMenuElements(self):
return self.mainMenuElements
class Deck():
''''shuffle' (bool) : shuffle deck.'''
colors = ('red','yellow','green','blue')
values = ('0','1','2','3','4','5','6','7','8','9','X','R','+2')
def __init__(self, populate):
'''Initializes proper deck of 108 Uno Cards.'''
self.deck = []
if populate:
self.populate(True)
def __getitem__(self, index):
return self.deck[index]
def populate(self, shuffle=True):
for color in self.colors:
for value in self.values:
self.deck.append(Card(color, value))
if value != '0':
self.deck.append(Card(color, value))
for i in range(4):
i #unused
self.deck.append(Card('wild', '+4'))
self.deck.append(Card('wild', 'W'))
if shuffle:
self.shuffle()
def __iter__(self):
return iter(self.deck)
def __len__(self):
return len(self.deck)
def draw(self):
return self.deck.pop()
def place(self, card):
return self.deck.append(card)
def insert(self, card):
self.deck.insert(0, card)
def shuffle(self):
random.shuffle(self.deck)
class ComputerPlayer(Player):
def __init__(self, name):
super().__init__(name)
self.type = 'Computer'
self.begun = False
self.colorsInHand = {'red':0, 'blue':0, 'green':0, 'yellow':0, 'wild':0}
self.colorsOutHand = {}
self.currentColor = ""
def addCard(self, card):
Player.addCard(self, card)
color = card.getColor()
self.colorsInHand[color] += 1
def indexCard(self, cardColor, cardValue):
for card in self.hand:
if card.getValue() == cardValue:
if cardValue in ('+4', 'W'):
return self.hand.indexCard(card)
else:
if card.getColor() == cardColor:
return self.hand.indexCard(card)
raise ValueError("Card Cannot Be Found")
def think(self, match):
card = None
self.currentColor = match.currentColor
currentValue = match.currentValue
zeroChangeRule = match.zeroChange
twoPlayers = False
previousTurnID = match.getNextTurn(True)
nextTurnID = match.getNextTurn(False)
previousPlayer = match.getPlayer(previousTurnID)
#nextPlayer = match.getPlayer(nextTurnID)
if previousTurnID == nextTurnID:
twoPlayers = True
if self.canSkip == False and self.canReverse == True:
self.canSkip = True
self.canReverse = False
self.getLegalCards(self.currentColor, currentValue, zeroChangeRule)
### DRAW CASE ###
if len(self.legalCards) == 0 and len(self.wildCards) == 0:
return "d"
else:
### NO LEGAL CARD, USE WILD CARD ###
if len(self.legalCards) == 0:
if zeroChangeRule and self.canZeroChange:
bestZeroColor = self.getBestColor(self.zeroCards)
card = self.getCardByColor(self.zeroCards, bestZeroColor)
else:
if self.canDrawFour:
card = self.getCardByValue(self.wildCards, "+4")
print(card)
else:
card = random.choice(self.wildCards)
else:
### HAS LEGAL CARD ###
if twoPlayers and self.canSkip: #Always play a skip card in a two player game
#print("Shed Skip Strategy")
card = self.getCardByValue(self.legalCards,"R", "X")
if self.canReverse and previousPlayer.didDraw():
#print("Reverse Strategy")
reverseCards = self.getAllCardsByValue(self.legalCards, "R")
for reverseCard in reverseCards:
if reverseCard.getColor() == self.currentColor:
card = reverseCard
if self.canValueChange:
# Computer Can Value Change, However, Should it?
# Computer Checks to See if Value Change Color is Better Than Current
currentColorNum = self.colorsInHand[self.currentColor]
bestValueChangeColor = self.getBestColor(self.valueChangeCards)
if self.colorsInHand[bestValueChangeColor] > currentColorNum or len(self.valueChangeCards) == len(self.legalCards):
card = self.getCardByColor(self.valueChangeCards, bestValueChangeColor)
if card == None:
#print("Random Strategy")
card = random.choice(list(set(self.legalCards) - set(self.valueChangeCards)))
color = card.getColor()
self.colorsInHand[color] -= 1
return str(self.indexCard(card.getColor(), card.getValue()))
def getWildColor(self):
maxKey = max(self.colorsInHand, key=self.colorsInHand.get)
if maxKey == 'wild':
return random.choice(('r','g','b','y'))
else:
return maxKey
def getCardByValue(self, cardList, *values):
for card in cardList:
if card.getValue() in values:
return card
def getAllCardsByValue(self, cardList, *values):
cards = []
for card in cardList:
if card.getValue() in values:
cards.append(card)
return cards
def getCardByColor(self, cardList, *colors):
for card in cardList:
if card.getColor() in colors:
return card
def getBestColor(self, cardList):
bestColor = None
bestColorNum = 0
for card in cardList:
color = card.getColor()
if self.colorsInHand[color] > bestColorNum:
bestColor = color
bestColorNum = self.colorsInHand[color]
return bestColor
class Card():
'''
'suit' (string) : Card's Color (rgby)
'rank' (string) : Card's Value (0-9, R, X, W, +2, +4)
'''
colors = {
'red' : '\033[91m',
'green' : '\033[92m',
'yellow' : '\033[93m',
'blue' : '\033[94m',
'purple' : '\033[95m',
'cyan' : '\033[96m',
'white' : '\033[97m',
'wild' : '',
'dwild' : '',
'dred' : '\033[31m',
'dgreen' : '\033[32m',
'dyellow' : '\033[33m',
'dblue' : '\033[34m',
'dpurple' : '\033[35m',
'dcyan' : '\033[36m',
'dwhite' : '\033[37m',
}
idMap = {
'red':'R','blue':'B','green':'G','yellow':'Y','wild':'W',
'0':'0','1':'1','2':'2','3':'3','4':'4','5':'5','6':'6','7':'7','8':'8','9':'9',
'+2':'+','R':'R','W':'W','+4':'$','X':'X'
}
bigNums = {
"0" : [" .d888b. ","d88P Y88b","888 888","888 888","888 888","888 888","d88P Y88b"," \"Y888P\" "],
"1" : [" d888 "," d8888 "," 888 "," 888 "," 888 "," 888 "," 888 "," 8888888 "],
"2" : [".d8888b. ","d88P Y88","d8 888"," .d88P",".od888P\" ","d88P\" ","888\" ","888888888"],
"3" : [" .d8888b.","d88P Y88"," .d88"," 8888\" "," \"Y8b","888 88","Y88b d88"," \"Y8888P\""],
"4" : [" d88b "," d8P88 "," d8 88 "," d8 88 ","d8 88 ","888888888"," 88 "," 88 "],
"5" : ["888888888","888 ","888 ","8888888b "," \"Y88b "," 888","Y88b d88P","\"Y8888P\" "],
"6" : [" .d888b. ","d88P Y88b","888 ","888d888b ","888P \"Y8b","888 888","Y88b d88b"," \"Y888P\" "],
"7" : ["888888888"," d8P"," d8P "," d8P "," 8888888 "," d8P "," d8P ","d8P "],
"8" : [" .d888b. ","d8P Y8b","Y8b. d8P"," \"Y8888\" "," .dP\"Yb. ","888 888","Y88b d88P"," \"Y888P\" "],
"9" : [" .d888b. ","d8P Y8b","88 88","Y8b. d88"," \"Y88P888"," 888","Y88b d88P"," \"Y888P\" "],
"X" : ["Y8b d8P"," Y8b d8P "," Y8o8P "," Y8P "," d8b "," d888b "," d8P Y8b ","d8P Y8b"],
"W" : ["88 88","88 88","88 o 88","88 d8b 88","88d888b88","88P Y88","8P Y8","P Y"],
"+2" : [" db "," 88 ","C8888D "," 88 8888"," VP 8"," 8888"," 8 "," 8888"],
"+4" : [" db "," 88 ","C8888D "," 88 d "," VP d8 "," d 8 "," d8888"," 8 "],
"R9" : [" d88P "," d88P "," d88P "," d88P "," Y88b "," Y88b "," Y88b "," Y88b "],
"R8" : [" d88P "," d88P "," d88P ","d88P ","Y88b "," Y88b "," Y88b "," Y88b "],
"R7" : [" d88P Y"," d88P ","d88P ","88P ","88b ","Y88b "," Y88b "," Y88b d"],
"R6" : [" d88P Y8","d88P Y","88P ","8P ","8b ","88b ","Y88b d"," Y88b d8"],
"R5" : ["d88P Y88","88P Y8","8P Y","P ","b ","8b d","88b d8","Y88b d88"],
"R4" : ["88P Y88b","8P Y88","P Y8"," Y"," d","b d8","8b d88","88b d88P"],
"R3" : ["8P Y88b ","P Y88b"," Y88"," Y8"," d8"," d88","b d88P","8b d88P "],
"R2" : ["P Y88b "," Y88b "," Y88b"," Y88"," d88"," d88P"," d88P ","b d88P "],
"R1" : [" Y88b "," Y88b "," Y88b "," Y88b"," d88P"," d88P "," d88P "," d88P "],
"R0" : [" Y88b "," Y88b "," Y88b "," Y88b "," d88P "," d88P "," d88P "," d88P "],
}
def __init__(self, color, value):
'''Initializes Uno Card w/ Color and Value.'''
self.wild = False #Is wild card?
self.zero = False
self.cardID = '{}{}'.format(self.idMap[color],self.idMap[value])
self.setColor(color)
self.setValue(value)
self.setPoints(value)
#############################################
### -\/- Retrieve Card Information -\/- ###
def __repr__(self):
return "{},{}".format(self.color, self.value)
def getBigNum(self, reverse, reverseSeed=0):
'''Returns list of strings to draw card's value on the pile.'''
bigNums = []
colorCode = self.colorCode
colorCodeDark = self.colorCodeDark
value = self.value
if value == 'R':
if not reverse:
value += str(reverseSeed)
else:
value += str(9-reverseSeed)
for mid in self.bigNums[value]:
bigNums += ['{}| |{}'.format(colorCode,colorCodeDark)+mid+'{}| |\033[0m\t'.format(colorCode)]
return bigNums
def getColor(self):
'''Returns card's color.'''
return self.color
def getColorCode(self):
'''Returns card's color code.'''
return self.colorCode
def getValue(self):
'''Returns card's value.'''
return self.value
def getPoints(self):
'''Returns card's point value.'''
return self.points
def getRow(self,rowNum,hide=False):
value = self.value
displaySpace = self.displaySpace
if hide:
colorCode = '\033[97m'
value = '?'
displaySpace = ' '
else:
colorCode = self.colorCode
if self.isWild():
if rowNum == 0:
colorCode = '\033[91m'
elif rowNum == 1:
colorCode = '\033[93m'
elif rowNum == 2:
colorCode = '\033[92m'
elif rowNum == 3:
colorCode = '\033[94m'
if rowNum == 0:
return '{}\u2666--\u2666\033[0m'.format(colorCode)
elif rowNum == 1:
return '{}|{}{}|\033[0m'.format(colorCode, displaySpace, value)
elif rowNum == 2:
if hide:
return '{}|? |\033[0m'.format(colorCode)
else:
return '{}| |\033[0m'.format(colorCode)
elif rowNum == 3:
return '{}\u2666--\u2666\033[0m'.format(colorCode)
#############################################
### -\/- Set Card Information -\/- ###
def setColor(self, color):
'''Sets Card's color and escape code.'''
if color == 'blue':
self.color = 'blue'
self.colorCode = self.colors['blue']
self.colorCodeDark = self.colors['dblue']
elif color == 'red':
self.color = 'red'
self.colorCode = self.colors['red']
self.colorCodeDark = self.colors['dred']
elif color == 'yellow':
self.color = 'yellow'
self.colorCode = self.colors['yellow']
self.colorCodeDark = self.colors['dyellow']
elif color == 'green':
self.color = 'green'
self.colorCode = self.colors['green']
self.colorCodeDark = self.colors['dgreen']
elif color == 'wild': #No color modification
self.wild = True
self.color = 'wild'
self.colorCodeDark = self.colors['dwild']
self.colorCode = self.colors['wild']
def setValue(self, value):
if value in ('0','1','2','3','4','5','6','7','8','9','X','R','+2','+4','W'):
self.value = value
self.displaySpace = ' '
if len(value) == 2:
self.displaySpace = ''
if value == '0':
self.zero = True
def setPoints(self, value):
if value in ('0','1','2','3','4','5','6','7','8','9'):
self.points = int(value)
elif value in ("W", "+4"):
self.points = 50
else:
self.points = 20
#############################################
### -\/- Wild Card Methods -\/- ###
def changeColor(self, color):
'''Changes Card's Color, Intended for Wild Cards.'''
self.setColor(color)
def isWild(self):
'''Returns if card is a wild card.'''
return self.wild
def isZero(self):
return self.zero
class Match():
elementsInit = {
### Names (final) ###
'P1Name':' ', 'P2Name':' ', 'P3Name':' ', 'P4Name':' ',
### Card Values ###
'P1Cards':' ', 'P2Cards':' ', 'P3Cards':' ', 'P4Cards':' ',
### Turn Colors / Hand###
'P1Turn':'', 'P2Turn':'', 'P3Turn':'', 'P4Turn':'',
'HName':'\t\t', 'HVisual':'' ,'Hand':'',
### Deck ###
'DNum':'', 'Deck':['','','','','','','','',''],
'PostDNum':'',
### Pile ###
'uHeader':'\t\t\t\t', 'uMiddle':' ', 'uLower':' ',
'oHeader':'\t\t\t', 'oMiddle':['\t\t\t','\t\t\t','\t\t\t','\t\t\t','\t\t\t','\t\t\t','\t\t\t','\t\t\t'],
### Messages ###
'Console':'', 'Error':''
}
speeds = {'slow':2,'normal':1,'fast':0}
def __init__(self, gs):
### Decks ###
self.deck = Deck(True)
self.pile = Deck(False)
### Player Information ###
self.players = gs.players
self.turnList = []
self.handTitles = {'play1':'','play2':'','play3':'','play4':''}
### Carry Information ###
self.displayEffects = gs.displayEffects
self.hideComputerHands = gs.hideComputerHands
self.zeroChange = gs.zeroChange
self.computerSpeed = self.speeds[gs.computerSpeed]
self.simulation = gs.computerSimulation
### Data ###
self.handPosition = 0 # For hand displays
self.drawAmount = 0 # Used for force draws
self.passes = 0 # Keep track of consecutive passes for emergency color change
self.passMax = 0 # Max passes before color change
self.turn = '' # Current turn
self.event = '' # Wild, Reverse, Skip, etc
self.wildColorChange = '' # Specifies color to change wild card to
self.currentColor = '' # Current color
self.currentValue = '' # Current value
self.winnerID = '' # ID of Player who Won
self.reverse = False # Is turn order reversed
self.turnComplete = False # Is turn complete
self.matchComplete = False # Is the Game over?
self.matchAbort = False # Did the match conclude without a winner?
self.forcedWild = False # Force change wild
### Initialize Names / Cards / Deck (Assuming New Game) ###
self.elements = dict(self.elementsInit)
keyStringName = 'P{}Name'
keyStringCards = 'P{}Cards'
for i in self.players:
self.elements[keyStringName.format(i[-1])] = self.players[i].getName()+(' '*(11-len(self.players[i].getName())))
self.elements[keyStringCards.format(i[-1])] = ' '+(' '*(3-len(str(self.players[i].getCardNum()))))+str(self.players[i].getCardNum())+' Cards'
self.elements['DNum'] = len(self.deck)
if len(str(len(self.deck))) < 2:
self.elements['PostDNum'] = '\t'
j = 8
for i in range(int(math.ceil(len(self.deck)/12))):
self.elements['Deck'][j] = '='
j -= 1
for key in GameSettings.playerIdentities:
try:
self.buildHandString(key)
self.turnList += [key]
except KeyError:
pass
self.passMax = len(self.turnList)
def clearShell(self):
os.system('cls' if os.name == 'nt' else 'clear')
def begin(self):
self.elements['Console'] = 'Beginning Game, Press Enter.'
print(self.drawScreen())
self.enterBreak()
self.eventDealCards()
self.turn = random.choice(self.turnList)
self.elements['Console'] = 'First turn will be {}. Press Enter.'.format(self.players[self.turn].getName())
print(self.drawScreen(True))
self.enterBreak()
self.placeCard()
self.elements['P{}Turn'.format(self.turn[-1])] = '\033[93m'
if self.event == 'wild':
self.eventWildCard()
elif self.event == 'reverse':
self.eventReverse()
def end(self, gs):
if not self.matchAbort:
points = 0
self.elements['P{}Turn'.format(self.turn[-1])] = ''
self.elements['Console'] = '{} Wins! Press Enter to Begin Point Tally'.format(self.players[self.winnerID].getName())
print(self.drawScreen())
self.enterBreak()
for identity in self.turnList:
if identity != self.winnerID:
self.turn = identity
self.elements['HName'] = self.handTitles[self.turn]
self.elements['P{}Turn'.format(self.turn[-1])] = '\033[93m'
while self.players[identity].getCardNum() > 0:
card = self.players[identity].removeCard(0)
points += card.getPoints()
self.elements['Console'] = '{} Won {} Points!'.format(self.players[self.winnerID].getName(),points)
keyStringCards = 'P{}Cards'
self.elements[keyStringCards.format(identity[-1])] = ' '+(' '*(3-len(str(self.players[identity].getCardNum()))))+str(self.players[identity].getCardNum())+' Cards'
self.players[identity].maxScroll = math.ceil((self.players[identity].getCardNum() / 10)-1)
if self.handPosition > self.players[identity].maxScroll:
self.handPosition -= 1
self.buildHandVisual(identity)
if self.displayEffects and not self.simulation:
print(self.drawScreen())
time.sleep(.1)
self.elements['P{}Turn'.format(self.turn[-1])] = ''
self.players[self.winnerID].addPoints(points)
self.elements['Console'] = '{} Won {} Points! Press Enter'.format(self.players[self.winnerID].getName(),points)
print(self.drawScreen())
self.enterBreak()
gs.clearStaging()
for identity in self.turnList:
self.players[identity].discardHand()
gs.addPlayer(self.players[identity])
return gs
def adjustCardAmount(self, playerID):
keyStringCards = 'P{}Cards'
self.elements[keyStringCards.format(playerID[-1])] = ' '+(' '*(3-len(str(self.players[playerID].getCardNum()))))+str(self.players[playerID].getCardNum())+' Cards'
self.players[playerID].maxScroll = math.ceil((self.players[playerID].getCardNum() / 10)-1)
if self.handPosition > self.players[playerID].maxScroll:
self.handPosition -= 1
self.buildHandVisual(playerID)
def buildHandString(self, playerID):
playerName = self.players[playerID].getName()
if len(playerName) < 9:
self.handTitles[playerID] = "{}'s Hand\t".format(self.players[playerID].getName())
else:
self.handTitles[playerID] = "{}'s Hand".format(self.players[playerID].getName())
def buildHandVisual(self, playerID):
string = '['
for i in range(self.players[playerID].maxScroll+1):
if i == self.handPosition:
string += '|'
else:
string += '-'
string += ']'
self.elements['HVisual'] = string
def checkInput(self, playerInput):
if playerInput == '':
return {'valid':False,'entry':playerInput}
if playerInput.isnumeric():
if int(playerInput)+(10*self.handPosition) < self.players[self.turn].getCardNum():
return {'valid':True,'entry':str(int(playerInput)+(10*self.handPosition)),'type':'card'}
else:
self.elements['Error'] = '{} is not a card.'.format(playerInput)
return {'valid':False,'entry':playerInput}
else:
playerInput = playerInput.lower()[0]
if playerInput in ['<','>','u','d','p','q','s']:
return {'valid':True,'entry':playerInput}
else:
self.elements['Error'] = '{} is not a valid selection.'.format(playerInput)
return {'valid':False,'entry':playerInput}
def checkColorInput(self, playerInput):
if playerInput == '':
return {'valid':False,'entry':playerInput}
playerInput = str(playerInput).lower()[0]
if playerInput[0] == 'b':
return {'valid':True,'entry':'blue'}
elif playerInput[0] == 'r':
return {'valid':True,'entry':'red'}
elif playerInput[0] == 'g':
return {'valid':True,'entry':'green'}
elif playerInput[0] == 'y':
return {'valid':True,'entry':'yellow'}
return {'valid':False,'entry':playerInput}
def eventDealCards(self):
if self.displayEffects and not self.simulation:
self.elements['Console'] = 'Dealing Cards...'
for i in ('play1','play2','play3','play4'):
if i in self.players:
for j in range(7):
j #unused
self.dealCard(i)
if self.displayEffects and not self.simulation:
print(self.drawScreen(True))
time.sleep(.1)
def eventReverse(self):
if self.displayEffects and not self.simulation:
hide = False
if self.players[self.turn].getType() == "Computer":
hide = self.hideComputerHands
self.elements['Console'] = "Reverse Card Played! Reversing Turn Order.".format(self.players[self.turn].getName())
print(self.drawScreen(hide))
time.sleep(1)
for i in range(10):
cardBigNums = self.pile[0].getBigNum(self.reverse,i)
self.elements['oMiddle'] = cardBigNums
print(self.drawScreen(hide))
if self.displayEffects and not self.simulation:
time.sleep(.1)
cardBigNums = self.pile[0].getBigNum(self.reverse,9)
self.elements['oMiddle'] = cardBigNums
self.reverse = not self.reverse
self.event = ''
def eventSkip(self):
if self.displayEffects and not self.simulation:
hide = False
if self.players[self.turn].getType() == "Computer":
hide = self.hideComputerHands
self.elements['Console'] = "Skip Card Placed! Skipping {}'s Turn.".format(self.players[self.turn].getName())
print(self.drawScreen(hide))
time.sleep(1)
for i in range(2):
i #unused
self.elements['P{}Turn'.format(self.turn[-1])] = '\033[91m'
print(self.drawScreen(hide))
time.sleep(.3)
self.elements['P{}Turn'.format(self.turn[-1])] = ''
print(self.drawScreen(hide))
time.sleep(.3)
self.turnComplete = True
self.event = ''
def eventWildCard(self):
hide = False
if not self.forcedWild:
if self.players[self.turn].getType() == 'Human':
self.elements['Console'] = 'Wild Card! Specifiy a Color: (B)lue, (R)ed, (G)reen, (Y)ellow'
self.elements['Error'] = 'Specifiy A Color'
print(self.drawScreen())
playerInput = str(input("Color Change: "))
checked = self.checkColorInput(playerInput)
while not checked['valid']:
if checked['entry'] == '<':
self.handPosition -= 1
if self.handPosition == -1:
self.handPosition = self.players[self.turn].maxScroll
self.buildHandVisual(self.turn)
elif checked['entry'] == '>':
self.handPosition += 1
if self.handPosition > self.players[self.turn].maxScroll:
self.handPosition = 0
self.buildHandVisual(self.turn)
print(self.drawScreen())
playerInput = str(input("Color Change: "))
checked = self.checkColorInput(playerInput)
else:
hide = self.hideComputerHands
checked = self.checkColorInput(self.players[self.turn].getWildColor())
self.wildColorChange = checked['entry']
else:
self.wildColorChange = self.checkColorInput(random.choice(('r','b','g','y')))['entry']
self.forcedWild = False
self.currentColor = self.wildColorChange
self.elements['Error'] = ""
if self.displayEffects and not self.simulation:
self.elements['Console'] = 'Wild Card! Changing Color.'
seed = 1
for i in range(10):
i #unused
if seed > 4:
seed = 1
print(self.drawScreen(hide,wildSeed=seed))
time.sleep(.1)
seed += 1
self.pile[0].changeColor(self.wildColorChange)
self.wildColorChange = ''
cardBigNums = self.pile[0].getBigNum(self.reverse)
self.elements['oHeader'] = '{}\u2666\u2666\u2666=========\u2666\u2666\u2666\033[0m\t'.format(self.pile[0].getColorCode())
self.elements['oMiddle'] = cardBigNums
self.event = ''
def eventDraw(self):
self.players[self.turn].addForceDraw(self.drawAmount)
self.drawAmount = 0
self.event = ''
def dealCard(self, playerID):
card = self.deck.draw()
self.players[playerID].addCard(card)
### Adjust Hand Visual ###
self.players[playerID].maxScroll = math.ceil((self.players[playerID].getCardNum() / 10)-1)
self.handPosition = self.players[playerID].maxScroll
self.buildHandVisual(playerID)
### Adjust Player Tile ###
keyStringCards = 'P{}Cards'
self.elements[keyStringCards.format(playerID[-1])] = ' '+(' '*(3-len(str(self.players[playerID].getCardNum()))))+str(self.players[playerID].getCardNum())+' Cards'
### Adjust Deck ###
self.elements['DNum'] = len(self.deck)
if len(str(len(self.deck))) < 2:
self.elements['PostDNum'] = '\t'
j = 8
self.elements['Deck'] = [' ',' ',' ',' ',' ',' ',' ',' ', ' ']
for i in range(math.ceil(len(self.deck)/12)):
i #unused
self.elements['Deck'][j] = '='
j -= 1
def placeCard(self, card=None):
if card == None:
### Used At Beginning For First Card ###
card = self.deck.draw()
self.elements['DNum'] = len(self.deck)
cardColor = card.getColorCode()
cardBigNums = card.getBigNum(self.reverse)
self.currentColor = card.getColor()
self.currentValue = card.getValue()
self.pile.insert(card)
self.elements['oHeader'] = '{}\u2666\u2666\u2666=========\u2666\u2666\u2666\033[0m\t'.format(cardColor)
self.elements['oMiddle'] = cardBigNums
if len(self.pile) > 1:
previousCard = self.pile[1]
previousCardColor = previousCard.getColorCode()
self.elements['uHeader'] = '{} \u2666\u2666\u2666=========\u2666\u2666\u2666\033[0m\t\t'.format(previousCardColor)
self.elements['uMiddle'] = '{}| |\033[0m'.format(previousCardColor)
self.elements['uLower'] = '{}\u2666\u2666\u2666\033[0m'.format(previousCardColor)
if self.currentColor == 'wild':
self.event = 'wild'
if self.currentValue == 'X':
self.event = 'skip'
elif self.currentValue == 'R':
if len(self.players) > 2:
self.event = 'reverse'
else:
self.event = 'skip'
elif self.currentValue == '+4':
self.drawAmount = 4
elif self.currentValue == '+2':
self.drawAmount = 2
self.passes = 0
def extractCard(self, playerID, index):
card = self.players[playerID].removeCard(index)
if self.players[playerID].getCardNum() == 0:
self.matchComplete = True
self.winnerID = self.turn
self.adjustCardAmount(playerID)
return card
def enterBreak(self):
if not self.simulation:
str(input())
return
def nextTurn(self):
self.turnComplete = False
self.handPosition = 0
turnType = self.players[self.turn].getType()
self.players[self.turn].beginTurn()
### Prepare Hand Visuals ###
self.elements['HName'] = self.handTitles[self.turn]
self.buildHandVisual(self.turn)
if self.event == 'skip':
self.eventSkip()
elif self.drawAmount > 0:
self.eventDraw()
while not self.turnComplete:
if turnType == 'Human':
self.players[self.turn].getLegalCards(self.currentColor, self.currentValue, self.zeroChange)
if len(self.deck) > 0:
self.elements['Console'] = 'Select a card, (D)raw, or (P)ause.'
else:
self.players[self.turn].removeForceDraw()
self.elements['Console'] = 'Select a card, (D)raw, (P)ause, or Pas(s).'
if self.players[self.turn].getForceDraws() > 0:
self.elements['Error'] = 'Draw Card Played! Draw {} cards.'.format(self.players[self.turn].getForceDraws())
print(self.drawScreen())
playerInput = str(input("\033[97mSelection: \033[92m"))
checked = self.checkInput(playerInput)
while not checked['valid']:
print(self.drawScreen())
playerInput = str(input("\033[97mSelection: \033[92m"))
checked = self.checkInput(playerInput)
playerInput = checked['entry']
if playerInput == '<':
self.handPosition -= 1
if self.handPosition == -1:
self.handPosition = self.players[self.turn].maxScroll
self.buildHandVisual(self.turn)
elif playerInput == '>':
self.handPosition += 1
if self.handPosition > self.players[self.turn].maxScroll:
self.handPosition = 0
self.buildHandVisual(self.turn)
elif playerInput == 'd':
if len(self.deck) > 0:
self.elements['Error'] = ''
self.dealCard(self.turn)
else:
self.elements['Error'] = "Cannot Draw. Deck is Empty"
elif playerInput == 'p':
pauseOutput = self.pauseScreen()
if pauseOutput == 'quit':
self.matchComplete = True
self.turnComplete = True
self.winnerID = 'play1'
self.matchAbort = True
elif playerInput == 's':
if len(self.deck) > 0:
self.elements['Error'] = "Cannot pass until Deck is empty."
elif len(self.players[self.turn].getAllValidCards()) > 0:
self.elements['Error'] = "Cannot pass while having playable cards."
else:
self.turnComplete = True
self.passes += 1
if self.passes == self.passMax:
self.forcedWild = True
self.event = 'wild'
self.passes = 0
elif playerInput.isnumeric():
if self.players[self.turn].getForceDraws() == 0:
cardCheck = self.players[self.turn].checkCard(playerInput)
if cardCheck in self.players[self.turn].getAllValidCards():
card = self.extractCard(self.turn, playerInput)
self.placeCard(card)
self.elements['Error'] = ""
self.turnComplete = True
else:
self.elements['Error'] = "Card Doesn't Match The Color {} or Value {}!".format(self.currentColor, self.currentValue)
else:
pass
elif turnType == 'Computer':
self.elements['Console'] = '{}\'s Turn'.format(self.players[self.turn].getName())
print(self.drawScreen(self.hideComputerHands))
if not self.simulation:
time.sleep(self.computerSpeed)
#str(input())
while (True):
if self.displayEffects and not self.simulation:
time.sleep(.2)
if self.players[self.turn].getForceDraws() > 0 and len(self.deck) > 0:
cardIndex = 'd'
else:
cardIndex = self.players[self.turn].think(self)
if cardIndex.isnumeric():
card = self.extractCard(self.turn, int(cardIndex))
if card.getColor() != self.currentColor:
self.resetDrawBool()
self.placeCard(card)
self.turnComplete = True
break
else:
if cardIndex == 'd':
if len(self.deck) > 0:
self.dealCard(self.turn)
print(self.drawScreen(self.hideComputerHands))
else:
self.turnComplete = True
self.players[self.turn].removeForceDraw()
self.passes += 1
if self.passes == self.passMax:
self.forcedWild = True
self.event = 'wild'
self.passes = 0
break
### DECODE INPUT ###
if self.event == 'reverse':
self.eventReverse()
elif self.event == 'wild':
self.eventWildCard()
# Clear Current Turn
self.elements['P{}Turn'.format(self.turn[-1])] = ''
# Prepare Next Turn
self.turn = self.getNextTurn()
self.elements['P{}Turn'.format(self.turn[-1])] = '\033[93m'
def drawScreen(self, hide=False, wildSeed=0):
if self.simulation:
return ''
colorCombos = {
1 : ['\033[91m','\033[93m','\033[92m','\033[94m'],
2 : ['\033[94m','\033[91m','\033[93m','\033[92m'],
3 : ['\033[92m','\033[94m','\033[91m','\033[93m'],
4 : ['\033[93m','\033[92m','\033[94m','\033[91m'] }
currentTurn = self.turn
if currentTurn == '':
currentTurn = self.turnList[-1]
hide = True
if wildSeed != 0:
colorMod = colorCombos[wildSeed]
else:
colorMod = ['','','','']
self.clearShell()
screenout = ''
screenout += '\t\t\033[94m || ||\033[92m ||\ || \033[91m// \\\\\n\033[0m'
screenout += '\t\t\033[94m || ||\033[92m ||\\\|| \033[91m(( ))\n\033[0m'
screenout += '\t\t\033[94m \\\ //\033[92m || \|| \033[91m \\\ //\n\033[0m'
screenout += '\033[97m===============================================================\n'
screenout += '\033[93m{}\033[0m\n'.format(self.elements['Console'])
screenout += '\033[97m===============================================================\n'
screenout += '\t\t\t\t\t\t' + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P1Turn'])
screenout += '\033[97mDeck:\t\t' + '{}'.format(self.elements['uHeader']) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P1Turn'],self.elements['P1Name'])
screenout += '\033[97m{} Cards'.format(self.elements['DNum']) + '{}'.format(self.elements['PostDNum'])+'\t' + '{}'.format(self.elements['uHeader']) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P1Turn'],self.elements['P1Cards'])
screenout += '\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[0],self.elements['oHeader']) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P1Turn'])
screenout += '\033[97m _+_ \t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[1],self.elements['oHeader']) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P2Turn'])
screenout += '\033[97m | ' + '\033[92m{}\033[0m'.format(self.elements['Deck'][0]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[2],self.elements['oMiddle'][0]) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P2Turn'],self.elements['P2Name'])
screenout += '\033[97m | ' + '\033[92m{}\033[0m'.format(self.elements['Deck'][1]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[3],self.elements['oMiddle'][1]) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P2Turn'],self.elements['P2Cards'])
screenout += '\033[97m | ' + '\033[92m{}\033[0m'.format(self.elements['Deck'][2]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[0],self.elements['oMiddle'][2]) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P2Turn'])
screenout += '\033[97m | ' + '\033[93m{}\033[0m'.format(self.elements['Deck'][3]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[1],self.elements['oMiddle'][3]) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P3Turn'])
screenout += '\033[97m | ' + '\033[93m{}\033[0m'.format(self.elements['Deck'][4]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[2],self.elements['oMiddle'][4]) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P3Turn'],self.elements['P3Name'])
screenout += '\033[97m | ' + '\033[93m{}\033[0m'.format(self.elements['Deck'][5]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uMiddle']) + '\033[97m{}{}'.format(colorMod[3],self.elements['oMiddle'][5]) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P3Turn'],self.elements['P3Cards'])
screenout += '\033[97m | ' + '\033[91m{}\033[0m'.format(self.elements['Deck'][6]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uLower']) + '\033[97m{}{}'.format(colorMod[0],self.elements['oMiddle'][6]) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P3Turn'])
screenout += '\033[97m | ' + '\033[91m{}\033[0m'.format(self.elements['Deck'][7]) + '\033[97m |\t\t ' + '{}'.format(self.elements['uLower']) + '\033[97m{}{}'.format(colorMod[1],self.elements['oMiddle'][7]) + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P4Turn'])
screenout += '\033[97m |_' + '\033[91m{}\033[0m'.format(self.elements['Deck'][8]) + '\033[97m_|\t\t ' + '\033[97m{}{}'.format(colorMod[2],self.elements['oHeader']) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P4Turn'],self.elements['P4Name'])
screenout += '\033[97m\t\t ' + '\033[97m{}{}'.format(colorMod[3],self.elements['oHeader']) + ' \033[97m{}|{}|\033[0m\n'.format(self.elements['P4Turn'],self.elements['P4Cards'])
screenout += '\t\t\t\t\t\t' + ' \033[97m{}\u2666-----------\u2666\033[0m\n'.format(self.elements['P4Turn'])
screenout += "\033[97m{}".format(self.elements['HName']) + "\t\t\t\t {}\n".format(self.elements['HVisual'])
screenout += '\033[97m===============================================================\n'
screenout += self.players[currentTurn].getHand(self.handPosition,hide)
screenout += '\033[91m{}\033[0m'.format(self.elements['Error'])
return screenout
def pauseScreen(self):
while True:
self.clearShell()
print('\n\t\t\tPause')
print('\n\t\t1. Resume')
print('\t\t2. Quit')
selection = str(input('\nSelection: ')).upper()
while selection not in ['1', '2']:
print('\nSelection Invalid')
selection = str(input('\nSelection: ')).upper()
if selection == '1' or "":
return ""
elif selection == '2':
return "quit"
def isComplete(self):
return self.matchComplete
def next(self):
self.turn = self.getNextTurn()
def getNextTurn(self, forceReverse=False):
if forceReverse:
reverse = not self.reverse
else:
reverse = self.reverse
currentIndex = self.turnList.index(self.turn)
if not reverse:
if (currentIndex + 1) == len(self.turnList):
return self.turnList[0]
else:
return self.turnList[currentIndex+1]
else:
if currentIndex == 0:
return self.turnList[len(self.turnList) - 1]
else:
return self.turnList[currentIndex-1]
def getPlayer(self, playerID):
return self.players[playerID]
def resetDrawBool(self):
for identity in self.players:
self.players[identity].drew = False
def Uno(debugging=False):
###MENUS###
def clearShell():
os.system('cls' if os.name == 'nt' else 'clear')
def mainMenu():
sys.stdout.write("\x1b[8;32;63t")
sys.stdout.flush()
gs = GameSettings()
while True:
print(drawMainMenu(gs))
selection = str(input('\033[97mSelection: \033[92m'))
while selection not in ['1', '2', '3', '4', '5']:
gs.mainMenuError = "Invalid Selection"
print(drawMainMenu(gs))
selection = str(input('\033[97mSelection: \033[92m'))
if selection == '1':
if gs.canBegin():
gs.mainMenuError = ""
gs.finalizePlayers()
gs = playMatch(gs)
else:
gs.mainMenuError = "Two Players Required to Begin"
elif selection == '2':
if gs.canAddPlayer():
gs.mainMenuError = ""
gs = addPlayer(gs)
else:
gs.mainMenuError = "Max Number of Players Reached"
elif selection == '3':
if gs.canAddPlayer():
gs.mainMenuError = ""
gs = addComputer(gs)
else:
gs.mainMenuError = "Max Number of Players Reached"
elif selection == '4':
if gs.canRemovePlayer():
gs.mainMenuError = ""
gs = removePlayer(gs)
else:
gs.mainMenuError = "No Players to Remove"
elif selection == '5':
gs.mainMenuError = ""
gs = settingsMenu(gs)
else:
raise BadInputError('Data Provided Has No Function')
def playMatch(gs):
for i in range(1):
i
m = Match(gs)
m.begin()
while (not m.isComplete()):
m.nextTurn()
gs = m.end(gs)
return gs
def addPlayer(gs):
colors = ['\033[91m','\033[94m', '\033[92m', '\033[93m']
nameOkay = False
playerNum = gs.getPlayerNum() + 1
colorIndex = playerNum - 1
message = "\033[97mPlease Enter Player {}'s Name: {}".format(playerNum, colors[colorIndex])
while not nameOkay:
print(drawMainMenu(gs))
name = str(input(message)).title()
if len(name) > 11:
gs.mainMenuError = "Name Must Be 11 Characters or Less!"
elif len(name) == 0:
gs.mainMenuError = ""
return gs
else:
nameOkay = True
for player in gs.playerStaging:
if player.getName() == name:
nameOkay = False
if nameOkay == False or name in GameSettings.computerNames:
gs.mainMenuError = "Name Cannot Match Another Player's Name!"
p = Player(name)
gs.addPlayer(p)
gs.mainMenuError = ""
return gs
def addComputer(gs):
name = gs.getComputerName()
c = ComputerPlayer(name)
gs.addPlayer(c)
return gs
def removePlayer(gs):
sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=32, cols=63))
sys.stdout.flush()
clearShell()
complete = False
playerNum = gs.getPlayerNum()
message = "\033[97mPlease Enter Player Number to Remove: \033[91m".format(playerNum)
while (not complete):
print(drawMainMenu(gs))
number = str(input(message))
if len(number) == 0:
gs.mainMenuError = ""
return gs
try:
number = int(number)
if 0 < number <= playerNum:
complete = True
else:
gs.mainMenuError = "Invalid Player Number!"
except:
gs.mainMenuError = "Please Enter the Player Number, not Name!"
gs.mainMenuError = ""
gs.removePlayer(number)
return gs
def settingsMenu(gs):
while True:
sys.stdout.write("\x1b[8;32;63t")
sys.stdout.flush()
clearShell()
print('\n\t\tSettings')
print('\n\t1. Draw Effects\t\t\t{}'.format(gs.displayEffects))
print('\t2. Hide Computer Hands\t\t{}'.format(gs.hideComputerHands))
print('\t3. Computer Speed\t\t{}'.format(gs.computerSpeed.title()))
#print('\t4. Zero Card Changes Color\t{}'.format(gs.zeroChange))
#print('\t5. Run Simulations\t\t{}'.format(gs.computerSimulation))
print('\n\tA. Exit')
selection = str(input('\nSelection: ')).upper()
while selection not in ('1', '2', '3', '4', '5', 'A', ''):
print('\nSelection Invalid')
selection = str(input('\nSelection: ')).upper()
if selection == '1':
gs.displayEffects = not gs.displayEffects
elif selection == '2':
gs.hideComputerHands = not gs.hideComputerHands
elif selection == '3':
gs.changeComputerSpeed()
'''
elif selection == '4':
gs.zeroChange = not gs.zeroChange
elif selection == '5':
gs.computerSimulation = not gs.computerSimulation
'''
elif selection == 'A' or selection == '' or selection in ('4','5'):
return gs
def drawMainMenu(gs):
clearShell()
gs.compileMainMenuElements()
menuElements = gs.getMainMenuElements()
screenout = ''
screenout += '\t\t\033[94m || ||\033[92m ||\ || \033[91m// \\\\\n\033[0m'
screenout += '\t\t\033[94m || ||\033[92m ||\\\|| \033[91m(( ))\n\033[0m'
screenout += '\t\t\033[94m \\\ //\033[92m || \|| \033[91m \\\ //\n\033[0m'
screenout += '\033[97m===============================================================\033[0m\n'
screenout += "{}1-----------------------------1\033[0m {}2-----------------------------2\033[0m\n".format(menuElements['play1box'],menuElements['play2box'])
screenout += "{}|{}|\033[0m {}|{}|\033[0m\n".format(menuElements['play1box'],menuElements['play1row1'],menuElements['play2box'],menuElements['play2row1'])
screenout += "{}|{}|\033[0m {}|{}|\033[0m\n".format(menuElements['play1box'],menuElements['play1row2'],menuElements['play2box'],menuElements['play2row2'])
screenout += "{}1-----------------------------1\033[0m {}2-----------------------------2\033[0m\n".format(menuElements['play1box'],menuElements['play2box'])
screenout += "{}3-----------------------------3\033[0m {}4-----------------------------4\033[0m\n".format(menuElements['play3box'],menuElements['play4box'])
screenout += "{}|{}|\033[0m {}|{}|\033[0m\n".format(menuElements['play3box'],menuElements['play3row1'],menuElements['play4box'],menuElements['play4row1'])
screenout += "{}|{}|\033[0m {}|{}|\033[0m\n".format(menuElements['play3box'],menuElements['play3row2'],menuElements['play4box'],menuElements['play4row2'])
screenout += "{}3-----------------------------3\033[0m {}4-----------------------------4\033[0m\n".format(menuElements['play3box'],menuElements['play4box'])
screenout += "\033[97m===============================================================\033[0m\n"
screenout += " {}\u2666---------------------------\u2666\033[0m \u2666===========================\u2666\n".format(menuElements['beginBox'])
screenout += " {}|1. Begin Match |\033[0m | High Scores |\n".format(menuElements['beginBox'])
screenout += " {}\u2666---------------------------\u2666\033[0m \u2666---------------------------\u2666\n".format(menuElements['beginBox'])
screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['addBox'])
screenout += " {}|2. Add Player |\033[0m | |\n".format(menuElements['addBox'])
screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['addBox'])
screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['addBox'])
screenout += " {}|3. Add Computer |\033[0m | |\n".format(menuElements['addBox'])
screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['addBox'])
screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['removeBox'])
screenout += " {}|4. Remove Player |\033[0m | |\n".format(menuElements['removeBox'])
screenout += " {}\u2666---------------------------\u2666\033[0m | |\n".format(menuElements['removeBox'])
screenout += " \033[97m\u2666---------------------------\u2666\033[0m | |\n"
screenout += " \033[97m|5. Settings |\033[0m | |\n"
screenout += " \033[97m\u2666---------------------------\u2666\033[0m \u2666===========================\u2666\n"
screenout += "\033[97m===============================================================\033[0m\n"
screenout += '\033[91m{}\033[0m'.format(gs.mainMenuError)
return screenout
mainMenu()
if __name__ == "__main__":
Uno()
|
the-stack_106_19540
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
#
# Helpful routines for regression testing
#
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import http.client
import random
import shutil
import subprocess
import tempfile
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
ZCASHD_BINARY = os.path.join('src', 'zcashd')
DEFAULT_FEE = Decimal('0.00001')
DEFAULT_FEE_ZATS = 1000
COVERAGE_DIR = None
PRE_BLOSSOM_BLOCK_TARGET_SPACING = 150
POST_BLOSSOM_BLOCK_TARGET_SPACING = 75
SPROUT_BRANCH_ID = 0x00000000
OVERWINTER_BRANCH_ID = 0x5BA81B19
SAPLING_BRANCH_ID = 0x76B809BB
BLOSSOM_BRANCH_ID = 0x2BB40E60
HEARTWOOD_BRANCH_ID = 0xF5B9230B
CANOPY_BRANCH_ID = 0xE9FF75A6
NU5_BRANCH_ID = 0xC2D6D0B4
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=0.125, timeout=60, allow_different_tips=False):
"""
Wait until everybody has the same tip, and has notified
all internal listeners of them.
If allow_different_tips is True, waits until everyone has
the same block count.
"""
while timeout > 0:
if allow_different_tips:
tips = [ x.getblockcount() for x in rpc_connections ]
else:
tips = [ x.getbestblockhash() for x in rpc_connections ]
if tips == [ tips[0] ]*len(tips):
break
time.sleep(wait)
timeout -= wait
# Now that the block counts are in sync, wait for the internal
# notifications to finish
while timeout > 0:
notified = [ x.getblockchaininfo()['fullyNotified'] for x in rpc_connections ]
if notified == [ True ] * len(notified):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Block sync failed")
def sync_mempools(rpc_connections, wait=0.5, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools, and has notified all internal listeners of them
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
timeout -= wait
# Now that the mempools are in sync, wait for the internal
# notifications to finish
while timeout > 0:
notified = [ x.getmempoolinfo()['fullyNotified'] for x in rpc_connections ]
if notified == [ True ] * len(notified):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "zcash.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("showmetrics=0\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser💻' + str(n), 'rpcpass🔑' + str(n)
def rpc_url(i, rpchost=None):
rpc_u, rpc_p = rpc_auth_pair(i)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for bitcoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if bitcoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('bitcoind exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir, num_nodes, cachedir):
"""
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache
"""
# Due to the consensus change fix for the timejacking attack, we need to
# ensure that the cache is pretty fresh. Specifically, we need the median
# time past of the chain tip of the cache to be no more than 90 minutes
# behind the current local time, or else mined blocks will be rejected by
# all nodes, halting the test. With Sapling active by default, this requires
# the chain tip itself to be no more than 75 minutes behind the current
# local time.
#
# We address this here, by regenerating the cache if it is more than 60
# minutes old. This gives 15 minutes of slack initially that an RPC test has
# to complete in, if it is started right at the oldest cache time. Within an
# individual test, the first five calls to `generate` will each advance the
# median time past of the chain tip by 2.5 minutes (with Sapling active by
# default). Therefore, if the logic between the completion of any two
# adjacent calls to `generate` within a test takes longer than 2.5 minutes,
# the excess will subtract from the slack.
if os.path.isdir(os.path.join(cachedir, "node0")):
if os.stat(cachedir).st_mtime + (60 * 60) < time.time():
print("initialize_chain(): Removing stale cache")
shutil.rmtree(cachedir)
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(cachedir, 'node'+str(i))):
create_cache = True
break
if create_cache:
#find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(cachedir,"node"+str(i))):
shutil.rmtree(os.path.join(cachedir,"node"+str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir=initialize_datadir(cachedir, i)
args = [ os.getenv("ZCASHD", ZCASHD_BINARY), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
args.extend([
'-nuparams=5ba81b19:1', # Overwinter
'-nuparams=76b809bb:1', # Sapling
])
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: bitcoind started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: RPC successfully started")
rpcs = []
for i in range(MAX_NODES):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+rpc_url(i)+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# Blocks are created with timestamps 2.5 minutes apart (matching the
# chain defaulting above to Sapling active), starting 200 * 2.5 minutes
# before the current time.
block_time = int(time.time()) - (200 * PRE_BLOSSOM_BLOCK_TARGET_SPACING)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += PRE_BLOSSOM_BLOCK_TARGET_SPACING
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Check that local time isn't going backwards
assert_greater_than(time.time() + 1, block_time)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(MAX_NODES):
os.remove(log_filename(cachedir, i, "debug.log"))
os.remove(log_filename(cachedir, i, "db.log"))
os.remove(log_filename(cachedir, i, "peers.dat"))
os.remove(log_filename(cachedir, i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join(cachedir, "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in zcash.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("ZCASHD", ZCASHD_BINARY)
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
args.extend([
'-nuparams=5ba81b19:1', # Overwinter
'-nuparams=76b809bb:1', # Sapling
])
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: bitcoind started, waiting for RPC to come up")
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: RPC successfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def assert_start_raises_init_error(i, dirname, extra_args=None, expected_msg=None):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
node = start_node(i, dirname, extra_args, stderr=log_stderr)
stop_node(node, i)
except Exception as e:
assert 'bitcoind exited' in str(e) #node must have shutdown
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
if binary is None: binary = [ None for _ in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def check_node(i):
bitcoind_processes[i].poll()
return bitcoind_processes[i].returncode
def stop_node(node, i):
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in list(bitcoind_processes.values()):
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(expected, actual, message=""):
if expected != actual:
if message:
message = "; %s" % message
raise AssertionError("(left == right)%s\n left: <%s>\n right: <%s>" % (message, str(expected), str(actual)))
def assert_true(condition, message = ""):
if not condition:
raise AssertionError(message)
def assert_false(condition, message = ""):
assert_true(not condition, message)
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(ExceptionType, errstr, func, *args, **kwargs):
"""
Asserts that func throws and that the exception contains 'errstr'
in its message.
"""
try:
func(*args, **kwargs)
except ExceptionType as e:
if errstr is not None and errstr not in str(e):
raise AssertionError("Invalid exception string: Couldn't find %r in %r" % (
errstr, str(e)))
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def fail(message=""):
raise AssertionError(message)
# Returns an async operation result
def wait_and_assert_operationid_status_result(node, myopid, in_status='success', in_errormsg=None, timeout=300):
print('waiting for async operation {}'.format(myopid))
result = None
for _ in range(1, timeout):
results = node.z_getoperationresult([myopid])
if len(results) > 0:
result = results[0]
break
time.sleep(1)
assert_true(result is not None, "timeout occurred")
status = result['status']
debug = os.getenv("PYTHON_DEBUG", "")
if debug:
print('...returned status: {}'.format(status))
errormsg = None
if status == "failed":
errormsg = result['error']['message']
if debug:
print('...returned error: {}'.format(errormsg))
assert_equal(in_errormsg, errormsg)
assert_equal(in_status, status, "Operation returned mismatched status. Error Message: {}".format(errormsg))
return result
# Returns txid if operation was a success or None
def wait_and_assert_operationid_status(node, myopid, in_status='success', in_errormsg=None, timeout=300):
result = wait_and_assert_operationid_status_result(node, myopid, in_status, in_errormsg, timeout)
if result['status'] == "success":
return result['result']['txid']
else:
return None
# Find a coinbase address on the node, filtering by the number of UTXOs it has.
# If no filter is provided, returns the coinbase address on the node containing
# the greatest number of spendable UTXOs.
# The default cached chain has one address per coinbase output.
def get_coinbase_address(node, expected_utxos=None):
addrs = [utxo['address'] for utxo in node.listunspent() if utxo['generated']]
assert(len(set(addrs)) > 0)
if expected_utxos is None:
addrs = [(addrs.count(a), a) for a in set(addrs)]
return sorted(addrs, reverse=True)[0][1]
addrs = [a for a in set(addrs) if addrs.count(a) == expected_utxos]
assert(len(addrs) > 0)
return addrs[0]
def check_node_log(self, node_number, line_to_check, stop_node = True):
print("Checking node " + str(node_number) + " logs")
if stop_node:
self.nodes[node_number].stop()
bitcoind_processes[node_number].wait()
logpath = self.options.tmpdir + "/node" + str(node_number) + "/regtest/debug.log"
with open(logpath, "r", encoding="utf8") as myfile:
logdata = myfile.readlines()
for (n, logline) in enumerate(logdata):
if line_to_check in logline:
return n
raise AssertionError(repr(line_to_check) + " not found")
def nustr(branch_id):
return '%08x' % branch_id
def nuparams(branch_id, height):
return '-nuparams=%s:%d' % (nustr(branch_id), height)
|
the-stack_106_19541
|
from injector import inject
from domain.connection.DeleteConnection.DeleteConnectionCommand import DeleteConnectionCommand
from domain.connection.services.ConnectionService import ConnectionService
from domain.notification.SendNotification.SendNotificationCommand import SendNotificationCommand
from domain.notification.SendNotification.SendNotificationRequest import SendNotificationRequest, \
NotificationAdditionalData
from infrastructure.cqrs.Dispatcher import Dispatcher
from infrastructure.cqrs.ICommandHandler import ICommandHandler
from infrastructure.data.RepositoryProvider import RepositoryProvider
class DeleteConnectionHandler(ICommandHandler[DeleteConnectionCommand]):
@inject
def __init__(self,
dispatcher: Dispatcher,
connection_service: ConnectionService,
repository_provider: RepositoryProvider,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.repository_provider = repository_provider
self.dispatcher = dispatcher
self.connection_service = connection_service
def handle(self, command: DeleteConnectionCommand):
result = self.connection_service.delete_connection(id=command.request.Id)
self.repository_provider.commit()
self.notify(message=result, id=command.request.Id)
def notify(self, message: str, id: int):
data_list = []
data = NotificationAdditionalData(Key="Type", Value="Connection")
data_list.append(data)
data = NotificationAdditionalData(Key="Id", Value=id)
data_list.append(data)
send_notification_request = SendNotificationRequest(Message=message, Type=1, Action=3, AdditionalData=data_list)
self.dispatcher.dispatch(SendNotificationCommand(request=send_notification_request))
|
the-stack_106_19543
|
import pybullet as p
from time import sleep
import pybullet_data
physicsClient = p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0, 0, -10)
planeId = p.loadURDF("plane.urdf", [0,0,-2])
boxId = p.loadURDF("cube.urdf", [0,3,2],useMaximalCoordinates = True)
bunnyId = p.loadSoftBody("bunny.obj")#.obj")#.vtk")
#meshData = p.getMeshData(bunnyId)
#print("meshData=",meshData)
#p.loadURDF("cube_small.urdf", [1, 0, 1])
useRealTimeSimulation = 1
if (useRealTimeSimulation):
p.setRealTimeSimulation(1)
print(p.getDynamicsInfo(planeId, -1))
#print(p.getDynamicsInfo(bunnyId, 0))
print(p.getDynamicsInfo(boxId, -1))
p.changeDynamics(boxId,-1,mass=10)
while p.isConnected():
p.setGravity(0, 0, -10)
if (useRealTimeSimulation):
sleep(0.01) # Time in seconds.
#p.getCameraImage(320,200,renderer=p.ER_BULLET_HARDWARE_OPENGL )
else:
p.stepSimulation()
|
the-stack_106_19545
|
default_client_config = {
'exception_on_negative_response' : True,
'exception_on_invalid_response' : True,
'exception_on_unexpected_response' : True,
'security_algo' : None,
'security_algo_params' : None,
'tolerate_zero_padding' : True,
'ignore_all_zero_dtc' : True,
'dtc_snapshot_did_size' : 2, # Not specified in standard. 2 bytes matches other services format.
'server_address_format' : None, # 8,16,24,32,40
'server_memorysize_format' : None, # 8,16,24,32,40
'data_identifiers' : {},
'input_output' : {},
'request_timeout' : 5,
'p2_timeout' : 1,
'p2_star_timeout' : 5,
}
|
the-stack_106_19546
|
"""Several helper functions to convert between data objects and JSON."""
import json
from tinycards.model import Card, Concept, Deck, Fact, Favorite
from tinycards.model import SearchableData, Side, Trendable, TrendableData
from tinycards.model import User
# --- User conversion
def json_to_user(json_data):
"""Convert a JSON dict into a User object."""
user_obj = User(
creation_date=json_data['creationDate'],
email=json_data['email'],
fullname=json_data['fullname'],
user_id=json_data['id'],
learning_language=json_data['learningLanguage'],
picture_url=json_data['picture'],
subscribed=json_data['subscribed'],
subscriber_count=json_data['subscriberCount'],
subscription_count=json_data['subscriptionCount'],
ui_language=json_data['uiLanguage'],
username=json_data['username']
)
return user_obj
# --- Fact conversion
def json_to_fact(json_data):
"""Convert a JSON dict into a Fact object."""
fact_obj = Fact(
fact_id=json_data['id'],
fact_type=json_data['type'],
text=json_data.get('text'),
image_url=json_data.get('imageUrl'),
tts_url=json_data.get('ttsUrl')
)
return fact_obj
def fact_to_json(fact_obj):
"""Convert a Fact object into a JSON dict."""
json_data = {
# 'id': fact_obj.id,
'text': fact_obj.text,
'type': fact_obj.type
}
return json_data
# --- Concept conversion
def json_to_concept(json_data):
"""Convert a JSON dict into a Concept object."""
concept_obj = Concept(
fact=json_to_fact(json_data['fact']),
concept_id=json_data['id'],
creation_timestamp=json_data['createdAt'],
update_timestamp=json_data['updatedAt']
)
return concept_obj
def concept_to_json(concept_obj):
"""Convert a Concept object into a JSON dict."""
json_data = {
# 'createdAt': concept_obj.creation_timestamp,
'fact': fact_to_json(concept_obj.fact),
# 'id': concept_obj.id,
# 'noteFacts': [],
# 'updatedAt': concept_obj.update_timestamp,
}
return json_data
# --- Side conversion
def json_to_side(json_data):
"""Convert a JSON dict into a Side object."""
side_obj = Side(
side_id=json_data['id'],
concepts=[json_to_concept(c) for c in json_data['concepts']]
)
return side_obj
def side_to_json(side_obj):
"""Convert a Side object into a JSON dict."""
json_data = {
'concepts': [concept_to_json(c) for c in side_obj.concepts],
# 'id': side_obj.side_id,
}
return json_data
# --- Card conversion
def json_to_card(json_data):
"""Convert a JSON dict into a Card object."""
card_obj = Card(
front=json_to_side(json_data['sides'][0]),
back=json_to_side(json_data['sides'][1]),
card_id=json_data['id']
)
return card_obj
def card_to_json(card_obj):
"""Convert a Card object into a JSON dict."""
json_data = {
# 'id': card_obj.id,
'creationTimestamp': card_obj.creation_timestamp,
'sides': [
side_to_json(card_obj.front),
side_to_json(card_obj.back)
],
}
# Add additional fields if not None.
# if card_obj.creation_timestamp:
# json_data['creationTimestamp'] = card_obj.creation_timestamp
return json_data
# --- Deck conversion
def json_to_deck(json_data):
"""Convert a JSON dict into a Deck object."""
deck = Deck(
title=json_data['name'],
description=json_data['description'],
deck_id=json_data['id'],
compact_id=json_data['compactId'],
slug=json_data['slug'],
cards=([json_to_card(c) for c in json_data['cards']]
if 'cards' in json_data else []),
private=bool(json_data['private']),
shareable=bool(json_data['shareable']),
blacklisted_side_indices=json_data['blacklistedSideIndices'],
blacklisted_question_types=json_data['blacklistedQuestionTypes'],
grading_modes=json_data['gradingModes'],
tts_languages=json_data['ttsLanguages'],
)
deck.image_url = json_data['imageUrl']
deck.cover_image_url = json_data['coverImageUrl']
return deck
def deck_to_json(deck_obj, as_json_str=False):
"""Convert a Deck object into a JSON dict.
Contains a lot of placeholder values at the moment.
Args:
as_json_str (bool): Convert lists into a single JSON string (required
for PATCH with content-type: application/json).
"""
cards = [card_to_json(c) for c in deck_obj.cards]
json_data = {
'name': deck_obj.title,
'description': deck_obj.description,
'private': deck_obj.private,
'shareable': deck_obj.shareable,
'cards': as_obj_or_json_str(cards, as_json_str),
'ttsLanguages': as_obj_or_json_str(
deck_obj.tts_languages, as_json_str
),
'blacklistedSideIndices': as_obj_or_json_str(
deck_obj.blacklisted_side_indices, as_json_str
),
'blacklistedQuestionTypes': as_obj_or_json_str(
deck_obj.blacklisted_question_types, as_json_str
),
'gradingModes': as_obj_or_json_str(
deck_obj.grading_modes, as_json_str
),
'fromLanguage': 'en',
'imageFile': deck_obj.cover,
'coverImageUrl': deck_obj.cover_image_url,
}
return json_data
def as_obj_or_json_str(obj, as_json_str):
return json.dumps(obj) if as_json_str else obj
# --- Trendable conversion
def json_to_trendable(json_data):
"""Convert a JSON dict into a Trendable object."""
json_trendable_data = json_data.get('data')
if not json_trendable_data:
raise ValueError("JSON object contains no 'data' field")
try:
trendable_data = TrendableData(
json_trendable_data['blacklistedQuestionTypes'],
json_trendable_data['blacklistedSideIndices'],
json_trendable_data['cardCount'],
json_trendable_data['compactId'],
json_trendable_data['coverImageUrl'],
json_trendable_data['createdAt'],
json_trendable_data['deckGroups'],
json_trendable_data['description'],
json_trendable_data['enabled'],
json_trendable_data['favoriteCount'],
json_trendable_data['fromLanguage'],
json_trendable_data.get('fullname'),
json_trendable_data['gradingModes'],
json_trendable_data['hashes'],
json_trendable_data['id'],
json_trendable_data['imageUrl'],
json_trendable_data['name'],
json_trendable_data['picture'],
json_trendable_data['private'],
json_trendable_data['shareable'],
json_trendable_data['slug'],
json_trendable_data['tagIds'],
json_trendable_data['ttsLanguages'],
json_trendable_data['uiLanguage'],
json_trendable_data['updatedAt'],
json_trendable_data['userId'],
json_trendable_data['username']
)
except KeyError as ke:
raise ke
trendable_obj = Trendable(id_=json_data['id'],
type_=json_data['type'],
data=trendable_data)
return trendable_obj
def trendable_to_json(trendable_obj: Trendable):
"""Convert a Trendable object into a JSON dict."""
trendable_data = trendable_obj.data
json_trendable_data = {
'blacklistedQuestionTypes': trendable_data.blacklisted_question_types,
'blacklistedSideIndices': trendable_data.blacklisted_side_indices,
'cardCount': trendable_data.card_count,
'compactId': trendable_data.compact_id,
'coverImageUrl': trendable_data.cover_image_url,
'createdAt': trendable_data.created_at,
'deckGroups': trendable_data.deck_groups,
'description': trendable_data.description,
'enabled': trendable_data.enabled,
'favoriteCount': trendable_data.favorite_count,
'fromLanguage': trendable_data.from_language,
'fullname': trendable_data.fullname,
'gradingModes': trendable_data.grading_modes,
'hashes': trendable_data.hashes,
'id': trendable_data.id,
'imageUrl': trendable_data.image_url,
'name': trendable_data.name,
'picture': trendable_data.picture,
'private': trendable_data.private,
'shareable': trendable_data.shareable,
'slug': trendable_data.slug,
'tagIds': trendable_data.tagIds,
'ttsLanguages': trendable_data.tts_languages,
'uiLanguage': trendable_data.ui_language,
'updatedAt': trendable_data.updated_at,
'username': trendable_data.username
}
json_data = {
'id': trendable_obj.id,
'type': trendable_obj.type,
'data': json_trendable_data
}
return json_data
# --- Searchable conversion
def json_to_searchable(json_data):
"""Convert a JSON dict into a Searchable object."""
json_searchable_data = json_data.get('data')
if not json_searchable_data:
raise ValueError("JSON object contains no 'data' field")
try:
searchable_data = SearchableData(
json_searchable_data['id'],
json_searchable_data['name'],
json_searchable_data['description'],
json_searchable_data.get('averageFreshness')
)
except KeyError as ke:
raise ke
searchable_obj = Trendable(id_=json_data['id'],
type_=json_data['type'],
data=searchable_data)
return searchable_obj
# --- Favorite conversion
def json_to_favorite(json_data):
"""Convert a JSON dict into a Favorite object."""
favorite_obj = Favorite(id_=json_data['id'],
deck=json_to_deck(json_data['deck']))
return favorite_obj
def favorite_to_json(favorite_obj: Favorite):
"""Convert a Favorite object into a JSON dict."""
json_data = {
'id': favorite_obj.id,
'deck': deck_to_json(favorite_obj.deck)
}
return json_data
|
the-stack_106_19547
|
n = int(input("Digite o termo da sequência Fibonacci: "))
a = 1
b = 1
k = 1
while k <= n - 2:
tmp = a
a = b
b = tmp + b
k = k + 1
print("O {}º da Sequência de Fibonacci é ocupado pelo número {}.".format(n,b))
#https://pt.stackoverflow.com/q/358586/101
|
the-stack_106_19548
|
#!/usr/bin/env python3
from time import sleep
import os
import RPi.GPIO as GPIO
pin = 14 # GPIO pin
maxTMP = 55 # The temperature in Celsius after which we trigger the fan
minTMP = 40 # The temperature in Celsius after which we stop the fan
sleepTime = 5
debug = False
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
def getCPUtemperature():
res = os.popen("vcgencmd measure_temp").readline()
temp =(res.replace("temp=","").replace("'C\n",""))
if (debug):
print("temp is {0}".format(temp)) #Uncomment here for testing
return temp
def fanON():
setPin(True)
return()
def fanOFF():
setPin(False)
return()
def getTEMP():
CPU_temp = float(getCPUtemperature())
if CPU_temp>maxTMP:
fanON()
elif CPU_temp<minTMP:
fanOFF()
return()
def setPin(mode):
GPIO.output(pin, mode)
return()
try:
while True:
getTEMP()
sleep(5)
except KeyboardInterrupt: # trap a CTRL+C keyboard interrupt
GPIO.cleanup() # resets all GPIO ports used by this program
|
the-stack_106_19549
|
from django.forms import ModelForm, TextInput
from django.core.exceptions import ValidationError
from .models import Paquete, Promocion
'''
PaqueteForm -> Creacion, modificacion y validacion de Paquetes
Clase relacionada -> CD18 [Control]
Casos de Uso relacionados -> {BE15, BE16}
'''
class PaqueteForm(ModelForm):
class Meta:
model = Paquete
fields = '__all__'
def clean(self):
precio_base = self.cleaned_data.get('precio_base')
if precio_base < 0:
raise ValidationError("El valor del precio base debe ser un número no negativo.")
"""
Promoción ->creación, modificación de promociones
clase relacionada ->CD08 [control]
caso de Uso relacionado -> {BE17, BE18}
"""
class PromocionForm(ModelForm):
class Meta:
model = Promocion
fields = '__all__'
widgets = {
'porcentaje_descuento': TextInput(attrs={"type": "number",
"min": "0",
"max": "100"})
}
def clean(self):
descuento = self.cleaned_data.get('porcentaje_descuento')
if descuento < 0 or descuento > 100:
raise ValidationError("El valor del descuento debe ser un número de 0 a 100.")
|
the-stack_106_19550
|
"""
Private utilities.
"""
import os
from typing import Any, Callable, Mapping, Optional, cast
import pyramid.config
def get_base_path(config: pyramid.config.Configurator) -> str:
return cast(str, env_or_config(config, "C2C_BASE_PATH", "c2c.base_path", "/c2c"))
def env_or_config(
config: Optional[pyramid.config.Configurator],
env_name: Optional[str] = None,
config_name: Optional[str] = None,
default: Any = None,
type_: Callable[[str], Any] = str,
) -> Any:
return env_or_settings(
config.get_settings() if config is not None else {}, env_name, config_name, default, type_
)
def env_or_settings(
settings: Optional[Mapping[str, Any]],
env_name: Optional[str] = None,
settings_name: Optional[str] = None,
default: Any = None,
type_: Callable[[str], Any] = str,
) -> Any:
if env_name is not None and env_name in os.environ and os.environ[env_name] != "":
return type_(os.environ[env_name])
if settings is not None and settings_name is not None and settings_name in settings:
return type_(settings[settings_name])
return default
def config_bool(value: Optional[str]) -> bool:
if value is None:
return False
return value.lower() in ("true", "t", "yes", "1")
|
the-stack_106_19551
|
"""
Return records as named tuples.
This saves a lot of memory.
"""
from collections import namedtuple
from dbfread import DBF
table = DBF('files/people.dbf', lowernames=True)
# Set record factory. This must be done after
# the table is opened because it needs the field
# names.
Record = namedtuple('Record', table.field_names)
factory = lambda lst: Record(**dict(lst))
table.recfactory = factory
for record in table:
print(record.name)
|
the-stack_106_19553
|
import numpy as np
import scipy.sparse as sp
import time
import scipy.linalg as la
class CLPerceptron():
S_X = np.array([[0, 1], [1, 0]], dtype=complex)
S_Y = np.array([[0, complex(0, -1)], [complex(0, 1), 0]], dtype=complex)
S_Z = np.array([[1, 0], [0, -1]], dtype=complex)
S = np.array([S_X, S_Y, S_Z])
def __init__(self, D, y, bias=False, manual_lookup=False):
self.D = D
self.bias = bias
self.y = y
self.n_samples = D.shape[0]
if bias:
self.add_bias()
self.dim = self.D.shape[1]
if not manual_lookup:
self._create_statistics_lookup_table()
def _create_statistics_lookup_table(self):
self.bx_lookup = np.zeros(self.n_samples)
self.qx_lookup = np.zeros(self.n_samples)
# gather statistics for each sample, store these based on index
for i in range(self.n_samples):
self.bx_lookup[i] = self._bx(self.D, self.D[i, :], self.y)
self.qx_lookup[i] = self._qx(self.D, self.D[i, :])
def train(self, max_iter, eta, calculate_loss=False, tol=10e-8, verbose=True):
_w = np.random.uniform(low=-1, high=1, size=self.dim)
_loss = []
_lh = []
_lh.append(self.likelihood(_w))
for i in range(max_iter):
h = np.dot(self.D, _w)
h_x = np.sqrt(np.square(h))
_delta_z = self.qx_lookup * (self.bx_lookup - np.tanh(h_x) * (h / h_x))
_w += eta * np.einsum(_delta_z, [0, ], self.D, [0, 1], [1, ]) # reg - 10e-10 * np.sum(_w)
_lh.append(self.likelihood(_w))
if abs(_lh[i] - _lh[i - 1]) < tol:
if verbose:
print("Convergence reached after {} steps".format(i))
self.w = _w
self.lh = _lh
self.loss = _loss
return
if verbose:
print("No convergence after {} steps!".format(max_iter))
self.w = _w
self.lh = _lh
self.loss = _loss
return
def predict(self, samples, ev=True):
def get_evalue(sample):
h = np.dot(self.w.T, sample)
p_one = 0.5 * (np.tanh(h) + 1)
return p_one, 1-p_one
# add bias if our training was done with bias
if self.bias:
samples = np.hstack([samples, np.ones(samples.shape[0]).reshape(-1, 1)])
# works similarly as calculate loss, but now returns the expectation value
p = np.apply_along_axis(get_evalue, axis=1, arr=samples)
if ev:
return p[:,0] - p[:,1]
return p[:,0], p[:,1]
def get_loss(self):
y_pred = self.predict(self.D)
loss = 0.5 * np.sum(np.absolute(y_pred - self.y))
return loss / self.n_samples
# def predict(self, _samples):
# return np.sign(np.dot(self.w, _samples.T))
def predict_sigm(self, _samples):
return self._sigmoid(np.dot(self.w, _samples.T))
def _H_x(self, _x,):
# calculate parameterised hamiltonian, in pauli basis.
_h = np.dot(self.w.T, _x)
_H = _h * CLQPerceptron.S[2]
return _H
@staticmethod
def _bx(X, sample, y):
_idx = np.where((X == tuple(sample)).all(axis=1))[0]
return np.sum(y[_idx]) / len(_idx)
@staticmethod
def _qx(X, sample):
_idx = np.where((X == tuple(sample)).all(axis=1))[0]
return len(_idx) / X.shape[0]
def likelihood(self, _w):
h = np.dot(_w.T, self.D.T)
h_x = np.sqrt(np.square(h))
L = np.sum(self.qx_lookup * (h * self.bx_lookup - np.logaddexp(h_x, -h_x)))
return L
def _delta_w(self, idx):
h = np.dot(self.w.T, self.D[idx, :])
return self.qx_lookup[idx] * (self.bx_lookup[idx] - np.tanh(h))
@staticmethod
def _sigmoid(x):
return 1 / (1 + np.exp(-x))
def add_bias(self):
self.D = np.hstack([self.D, np.ones(self.n_samples).reshape(-1, 1)])
|
the-stack_106_19554
|
# MIT License
#
# Copyright (c) 2019 ABN AMRO Bank N.V.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from supporting import generalConstants
from supporting import generalSettings
from cicd.informatica import infaConstants
from cicd.informatica import infaSettings
from supporting import log
import logging
logger = logging.getLogger(__name__)
entrynr = 0
def build(**KeyWordArguments):
"""Build an IDQ command, return it as string
Process the input aruguments to compose the IDQ command
This is done by first creating a list of strings, that are then joined to form the actual
command
The syntax used is as follows:
$InfaPath + $InfaProgram + $InfaCommand + $InfaArguments
"""
procName = "build"
# Create the list that will hold the parts of the command, in order
InfaArguments = []
# Process each input argument.
# The Tool arguments are processed separately, because those have to go first
# For the other arguments, the order does not matter, so they can be processed together
for key, value in KeyWordArguments.items():
log(logger, logging.DEBUG, procName, "key =>" + key + "<.")
if isinstance(value, str):
if key.lower().__contains__("password"):
log(logger, logging.DEBUG, procName, "value =>" + "***" + "<.")
else:
log(logger, logging.DEBUG, procName, "value =>" + value + "<.")
# If the argument is "Tool" , assign the value to the variable Tool, and lookup the Program and
# Command in AvailableTools, assign those to InfaProgram, InfaCommand
if key == "Tool":
Tool = KeyWordArguments["Tool"]
(InfaProgram, InfaCommand) = infaConstants.AvailableTools[value]
elif key == "Project":
projectName = value
InfaArguments.append(infaConstants.AvailableArguments[key] + " " + '"' + value + '"')
elif key == "ExportRefData":
if value == generalConstants.YES:
InfaArguments.append(
"-oo " + '"' + "rtm:disName=" + infaSettings.sourceDIS + ",codePage=UTF-8,refDataFile=" +
generalSettings.artifactDir + "/" + projectName + ".zip" + '"')
# If the argument is anything else, look it up in AvailableArguments and add the found
# value to InfaArguments
elif key in infaConstants.AvailableArguments:
InfaArguments.append(infaConstants.AvailableArguments[key] + " " + '"' + value + '"')
elif value is not None and value != "NONE":
if key == "AsIsOptions":
new_value = value.lstrip('"').rstrip('"')
log(logger, logging.DEBUG, procName, "stripped value =>" + new_value + "<.")
InfaArguments.append(" " + new_value + " ")
elif key == "OutputFile":
InfaArguments.append(" >" + value + " ")
elif key != "OnError":
InfaArguments.append("-" + key + " " + '"' + value + '"')
# Put all parts of the command in the same list, in correct order, and join them into one
# string
IDQCommandParts = [infaSettings.sourceInfacmd, InfaProgram, InfaCommand] + InfaArguments
IDQCommand = " ".join(IDQCommandParts)
return (IDQCommand)
|
the-stack_106_19555
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the strategy class (extended from the generic_seller skill)."""
import json
from typing import Any, Dict
from packages.fetchai.skills.generic_seller.strategy import GenericStrategy
class Strategy(GenericStrategy):
"""This class defines a strategy for the agent."""
def __init__(self, **kwargs: Any) -> None:
"""
Initialize the strategy of the agent.
:return: None
"""
self.shared_state_key = kwargs.pop("shared_state_key", None)
if self.shared_state_key is None:
raise ValueError("No shared_state_key provided!")
super().__init__(**kwargs)
def collect_from_data_source(self) -> Dict[str, str]:
"""
Build the data payload.
:return: a dict of the data found in the shared state.
"""
data = self.context.shared_state.get(self.shared_state_key, b"{}")
formatted_data = self._format_data(data)
return formatted_data
def _format_data(self, data: bytes) -> Dict[str, str]:
"""
Convert to dict.
:return: a dict with key and values as strings
"""
result: Dict[str, str] = {}
try:
loaded = json.loads(data)
if isinstance(loaded, dict) and all(
[
isinstance(key, str) and isinstance(value, str)
for key, value in loaded.items()
]
):
result = loaded
else:
result = {"data": json.dumps(loaded)}
except json.decoder.JSONDecodeError as e:
self.context.logger.warning(f"error when loading json: {e}")
return result
|
the-stack_106_19560
|
from abc import ABC
from abc import abstractmethod
from functools import wraps
import inspect
import re
import types
from typing import List
import syft
from syft.generic.pointers import PointerTensor
from syft.generic.pointers import MultiPointerTensor
from syft.generic.tensor import initialize_tensor
from syft.generic.tensor import _apply_args
from syft.workers import BaseWorker
from syft.exceptions import route_method_exception
from syft.exceptions import TensorsNotCollocatedException
class FrameworkHook(ABC):
@abstractmethod
def __init__(self, framework_module, local_worker: BaseWorker = None, is_client: bool = True):
pass
### Public API: framework-specific factory methods ###
@classmethod
@abstractmethod
def create_shape(cls, shape_dims):
"""Factory method for creating a generic FrameworkShape."""
pass
@classmethod
@abstractmethod
def create_wrapper(cls, child_to_wrap, *args, **kwargs):
"""Factory method for creating a generic FrameworkTensor wrapper."""
pass
@classmethod
@abstractmethod
def create_zeros(cls, shape, dtype, **kwargs):
"""Factory method for creating a generic zero FrameworkTensor."""
pass
### Standardized, framework-specific methods ###
@abstractmethod
def _hook_native_tensor(self, tensor_type: type, syft_type: type):
"""Add PySyft-specific tensor functionality to the given tensor type.
See framework-specific implementations for more details.
"""
# _hook_native_tensor is framework-specific, but it calls the methods
# defined below!
pass
@classmethod
@abstractmethod
def _add_methods_from_native_tensor(
cls, tensor_type: type, syft_type: type, exclude: List[str]
):
"""Adds methods from the syft_type class to the tensor_type class.
The class syft_type is a proxy class useful to avoid extending
the native tensor class directly.
Args:
tensor_type: The tensor type to which we are adding methods.
syft_type: The tensor from which we are adding methods.
exclude: A list of method names to exclude from the hooking process.
"""
# For all methods defined in syft_type which are not internal methods
# (like __class__, etc)
for attr in dir(syft_type):
if attr not in exclude:
if hasattr(tensor_type, attr):
setattr(tensor_type, f"native_{attr}", getattr(tensor_type, attr))
# Add to the native tensor this method
setattr(tensor_type, attr, getattr(syft_type, attr))
### Generics methods ###
def _hook_native_methods(self, tensor_type: type):
"""
Add hooked version of all methods of to_auto_overload[tensor_type]
to the tensor_type; instead of performing the native tensor
method, the hooked version will be called
Args:
tensor_type: the tensor_type which holds the methods
"""
# Use a pre-defined list to select the methods to overload
for attr in self.to_auto_overload[tensor_type]:
# if we haven't already overloaded this function
if f"native_{attr}" not in dir(tensor_type):
native_method = getattr(tensor_type, attr)
setattr(tensor_type, f"native_{attr}", native_method)
new_method = self._get_hooked_method(attr)
setattr(tensor_type, attr, new_method)
def _hook_properties(hook_self, tensor_type: type):
"""Overloads tensor_type properties.
If you're not sure how properties work, read:
https://www.programiz.com/python-programming/property
Args:
tensor_type: The tensor class which is having properties
added to it.
"""
@property
def location(self):
return self.child.location
tensor_type.location = location
@property
def id_at_location(self):
return self.child.id_at_location
tensor_type.id_at_location = id_at_location
@property
def id(self):
if not hasattr(self, "_syft_id"):
self._syft_id = syft.ID_PROVIDER.pop()
return self._syft_id
@id.setter
def id(self, new_syft_id):
self._syft_id = new_syft_id
return self
tensor_type.id = id
@property
def owner(self):
if not hasattr(self, "_owner"):
self._owner = hook_self.local_worker
return self._owner
@owner.setter
def owner(self, new_owner):
self._owner = new_owner
return self
tensor_type.owner = owner
@property
def is_wrapper(self):
if not hasattr(self, "_is_wrapper"):
self._is_wrapper = False
return self._is_wrapper
@is_wrapper.setter
def is_wrapper(self, it_is_a_wrapper):
self._is_wrapper = it_is_a_wrapper
return self
tensor_type.is_wrapper = is_wrapper
tensor_type.native_shape = tensor_type.shape
def dim(self):
return len(self.shape)
tensor_type.dim = dim
def _which_methods_should_we_auto_overload(self, tensor_type: type):
"""Creates a list of Torch methods to auto overload.
By default, it looks for the intersection between the methods of
tensor_type and torch_type minus those in the exception list
(syft.torch.exclude).
Args:
tensor_type: Iterate through the properties of this tensor type.
syft_type: Iterate through all attributes in this type.
Returns:
A list of methods to be overloaded.
"""
boolean_comparators = ["__gt__", "__ge__", "__lt__", "__le__"]
to_overload = boolean_comparators
native_pattern = re.compile("native*")
for attr in dir(tensor_type):
# Conditions for not overloading the method
if attr in syft.framework.exclude:
continue
if not hasattr(tensor_type, attr):
continue
lit = getattr(tensor_type, attr)
is_base = attr in dir(object)
is_desc = inspect.ismethoddescriptor(lit)
is_func = isinstance(lit, types.FunctionType)
is_overloaded = native_pattern.match(attr) is not None
if (is_desc or is_func) and not is_base and not is_overloaded:
to_overload.append(attr)
return set(to_overload)
def _hook_syft_tensor_methods(self, tensor_type: type, syft_type: type):
"""
Add hooked version of all methods of to_auto_overload[tensor_type]
to the syft_type, so that they act like regular tensors in
terms of functionality, but instead of performing the native tensor
method, it will be forwarded to each share when it is relevant
Args:
tensor_type: The tensor type to which we are adding methods.
syft_type: the syft_type which holds the methods
"""
# Use a pre-defined list to select the methods to overload
for attr in self.to_auto_overload[tensor_type]:
if attr not in dir(syft_type):
new_method = self._get_hooked_syft_method(attr)
setattr(syft_type, attr, new_method)
def _hook_pointer_tensor_methods(self, tensor_type):
"""
Add hooked version of all methods of the tensor_type to the
Pointer tensor: instead of performing the native tensor
method, it will be sent remotely to the location the pointer
is pointing at.
"""
boolean_comparators = ["__gt__", "__ge__", "__lt__", "__le__"]
# Use a pre-defined list to select the methods to overload
for attr in self.to_auto_overload[tensor_type]:
if attr not in dir(PointerTensor) or attr in boolean_comparators:
new_method = self._get_hooked_pointer_method(attr)
setattr(PointerTensor, attr, new_method)
def _hook_multi_pointer_tensor_methods(self, tensor_type):
"""
Add hooked version of all methods of the torch Tensor to the
Multi Pointer tensor: instead of performing the native tensor
method, it will be sent remotely for each pointer to the
location it is pointing at.
"""
# Use a pre-defined list to select the methods to overload
for attr in self.to_auto_overload[tensor_type]:
if attr not in dir(MultiPointerTensor):
new_method = self._get_hooked_multi_pointer_method(attr)
setattr(MultiPointerTensor, attr, new_method)
def _add_registration_to___init__(hook_self, tensor_type: type, is_tensor: bool = False):
"""Adds several attributes to the tensor.
Overload tensor_type.__init__ to add several attributes to the tensor
as well as (optionally) registering the tensor automatically.
TODO: auto-registration is disabled at the moment, this might be bad.
Args:
tensor_type: The class of the tensor being hooked
torch_tensor: An optional boolean parameter (default False) to
specify whether to skip running the native initialization
logic. TODO: this flag might never get used.
"""
if "native___init__" not in dir(tensor_type):
tensor_type.native___init__ = tensor_type.__init__
def new___init__(cls, *args, owner=None, id=None, register=True, **kwargs):
initialize_tensor(
hook_self=hook_self,
cls=cls,
id=id,
is_tensor=is_tensor,
init_args=args,
init_kwargs=kwargs,
)
tensor_type.__init__ = new___init__
def _get_hooked_syft_method(hook_self, attr):
"""
Hook a method in order to replace all args/kwargs syft/torch tensors with
their child attribute, forward this method with the new args and new self,
get response and "rebuild" the syft tensor wrapper upon all tensors found
Args:
attr (str): the method to hook
Return:
the hooked method
"""
@wraps(attr)
def overloaded_syft_method(self, *args, **kwargs):
"""
Operate the hooking
"""
# TODO: I can't manage the import issue, can you?
# Replace all syft tensor with their child attribute
new_self, new_args, new_kwargs = syft.frameworks.torch.hook_args.unwrap_args_from_method(
attr, self, args, kwargs
)
# Send it to the appropriate class and get the response
response = getattr(new_self, attr)(*new_args, **new_kwargs)
# Put back SyftTensor on the tensors found in the response
response = syft.frameworks.torch.hook_args.hook_response(
attr, response, wrap_type=type(self), wrap_args=self.get_class_attributes()
)
return response
return overloaded_syft_method
def _get_hooked_method(hook_self, method_name):
"""
Hook a method in order to replace all args/kwargs syft/torch tensors with
their child attribute if they exist
If so, forward this method with the new args and new self, get response
and "rebuild" the torch tensor wrapper upon all tensors found
If not, just execute the native torch methodn
Args:
attr (str): the method to hook
Return:
the hooked method
"""
@wraps(method_name)
def overloaded_native_method(self, *args, **kwargs):
"""
Operate the hooking
"""
if not hasattr(self, "child"): # means that it's not a wrapper
method = getattr(self, f"native_{method_name}")
# Run the native function with the new args
try:
response = method(*args, **kwargs)
except BaseException as e:
# we can make some errors more descriptive with this method
raise route_method_exception(e, self, args, kwargs)
else: # means that there is a wrapper to remove
try:
# Replace all torch tensor with their child attribute
new_self, new_args, new_kwargs = syft.frameworks.torch.hook_args.unwrap_args_from_method(
method_name, self, args, kwargs
)
except BaseException as e:
# we can make some errors more descriptive with this method
raise route_method_exception(e, self, args, kwargs)
# Send the new command to the appropriate class and get the response
method = getattr(new_self, method_name)
response = method(*new_args, **new_kwargs)
# For inplace methods, just directly return self
if syft.framework.is_inplace_method(method_name):
return self
# Put back the wrappers where needed
response = syft.frameworks.torch.hook_args.hook_response(
method_name, response, wrap_type=type(self), new_self=self
)
return response
return overloaded_native_method
def _get_hooked_func(hook_self, attr):
"""
Hook a function in order to inspect its args and search for pointer
or other syft tensors.
- Calls to this function with normal tensors or numbers / string trigger
usual behaviour
- Calls with pointers send the command to the location of the pointer(s)
- Calls with syft tensor will in the future trigger specific behaviour
Args:
attr (str): the method to hook
Return:
the hooked method
"""
if attr.__module__ is None:
attr.__module__ = "torch"
cmd_name = f"{attr.__module__}.{attr.__name__}"
@wraps(attr)
def overloaded_func(*args, **kwargs):
"""
Operate the hooking
"""
try:
tensor_type = (
type(args[0]) if not isinstance(args[0], (tuple, list)) else type(args[0][0])
)
except IndexError:
tensor_type = syft.framework.Tensor
command = (cmd_name, None, args, kwargs)
try:
handle_func_command = tensor_type.handle_func_command
except AttributeError:
handle_func_command = syft.framework.Tensor.handle_func_command
response = handle_func_command(command)
return response
return overloaded_func
def _get_hooked_pointer_method(hook_self, attr):
"""
Hook a method to send it to remote worker
Args:
attr (str): the method to hook
Return:
the hooked method
"""
@wraps(attr)
def overloaded_pointer_method(self, *args, **kwargs):
"""
Operate the hooking
"""
pointer = self
# Get info on who needs to send where the command
owner = pointer.owner
location = pointer.location
if len(args) > 0:
if isinstance(args[0], PointerTensor):
if args[0].location.id != location.id:
raise TensorsNotCollocatedException(pointer, args[0], attr)
# Send the command
command = (attr, self, args, kwargs)
response = owner.send_command(location, command)
# For inplace methods, just directly return self
if syft.framework.is_inplace_method(attr):
return self
return response
return overloaded_pointer_method
def _get_hooked_multi_pointer_method(hook_self, attr):
"""
Hook a method to send it multiple recmote workers
Args:
attr (str): the method to hook
Return:
the hooked method
"""
def dispatch(args, k):
return map(lambda x: x[k] if isinstance(x, dict) else x, args)
@wraps(attr)
def overloaded_attr(self, *args, **kwargs):
"""
Operate the hooking
"""
# Replace all syft tensor with their child attribute
new_self, new_args, new_kwargs = syft.frameworks.torch.hook_args.unwrap_args_from_method(
attr, self, args, kwargs
)
results = {}
for k, v in new_self.items():
results[k] = v.__getattribute__(attr)(*dispatch(new_args, k), **new_kwargs)
# Put back MultiPointerTensor on the tensors found in the response
response = syft.frameworks.torch.hook_args.hook_response(
attr, results, wrap_type=MultiPointerTensor, wrap_args=self.get_class_attributes()
)
return response
return overloaded_attr
|
the-stack_106_19561
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from status_dock.bar import Bar
CONFIG_FILE = f"{os.environ['HOME']}/.i3/status.conf.json"
if __name__ == "__main__":
import sys
try:
layout = sys.argv[1]
except IndexError:
raise ValueError("Please supply the layout as the only argument.")
b = Bar(CONFIG_FILE, layout)
b.run()
|
the-stack_106_19562
|
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from django.conf import settings
from django.db import connection
from django.utils.text import slugify
from django.db import IntegrityError
from contactnetwork.cube import compute_interactions
from contactnetwork.models import *
import contactnetwork.interaction as ci
from residue.models import ResidueGenericNumber, ResidueNumberingScheme, Residue, ResidueGenericNumberEquivalent
from structure.models import (Structure, StructureType, StructureSegment, StructureStabilizingAgent,PdbData,
Rotamer, StructureSegmentModeling, StructureCoordinates, StructureCoordinatesDescription, StructureEngineering,
StructureEngineeringDescription, Fragment)
import os, time
import yaml
from interaction.views import runcalculation,parsecalculation
from multiprocessing import Queue, Process, Value, Lock
class Command(BaseCommand):
help = "Output all uniprot mappings"
update = True
purge = True
processes = 8
def prepare_input(self, proc, items, iteration=1):
q = Queue()
procs = list()
num_items = len(items)
num = Value('i', 0)
lock = Lock()
if not num_items:
return False
# make sure not to use more jobs than proteins (chunk size will be 0, which is not good)
if proc > num_items:
proc = num_items
chunk_size = int(num_items / proc)
connection.close()
for i in range(0, proc):
first = chunk_size * i
if i == proc - 1:
last = False
else:
last = chunk_size * (i + 1)
p = Process(target=self.main_func, args=([(first, last), iteration,num,lock]))
procs.append(p)
p.start()
for p in procs:
p.join()
def purge_contact_network(self):
InteractingResiduePair.truncate()
Distance.truncate()
Interaction.truncate()
def build_contact_network(self,s,pdb_code):
interacting_pairs, distances = compute_interactions(pdb_code, save_to_db=True)
def handle(self, *args, **options):
self.ss = Structure.objects.all()
self.structure_data_dir = os.sep.join([settings.DATA_DIR, 'structure_data', 'structures'])
if self.purge:
self.purge_contact_network()
print(len(self.ss),'structures')
self.prepare_input(self.processes, self.ss)
# for s in Structure.objects.all():
# self.purge_contact_network(s)
# self.build_contact_network(s,s.pdb_code.index)
def main_func(self, positions, iteration,count,lock):
# filenames
# if not positions[1]:
# filenames = self.filenames[positions[0]:]
# else:
# filenames = self.filenames[positions[0]:positions[1]]
ss = self.ss
while count.value<len(ss):
with lock:
if count.value<len(ss):
s = ss[count.value]
count.value +=1
# print(s, count.value)
else:
break
source_file_path = os.sep.join([self.structure_data_dir, s.pdb_code.index.upper() + ".yaml"])
if os.path.isfile(source_file_path):
with open(source_file_path, 'r') as f:
sd = yaml.load(f, Loader=yaml.FullLoader)
peptide_chain = ""
if 'ligand' in sd and sd['ligand'] and sd['ligand']!='None':
if isinstance(sd['ligand'], list):
ligands = sd['ligand']
else:
ligands = [sd['ligand']]
for ligand in ligands:
peptide_chain = ""
if 'chain' in ligand:
peptide_chain = ligand['chain']
# self.purge_contact_network(s)
current = time.time()
if self.update:
if Distance.objects.filter(structure=s).count():
print(s,'already done - skipping')
continue
try:
self.build_contact_network(s,s.pdb_code.index)
print(s,"Contact Network",time.time()-current)
except:
print(s,'Failed contact network')
# current = time.time()
#runcalculation(s.pdb_code.index,peptide_chain)
#parsecalculation(s.pdb_code.index,False)
#print(s,"Ligand Interactions",time.time()-current)
|
the-stack_106_19563
|
import sqlite3
from sqlite3 import Error
def create_connection(db_file):
conn = None
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return conn
def fetch_user(conn, username):
cur = conn.cursor()
cur.execute(f'SELECT * FROM users where name = "{username}"')
rows = cur.fetchall()
for row in rows:
print(row)
def fetch_slots(conn, user_id):
cur = conn.cursor()
cur.execute(f'SELECT * FROM calender where user_id = "{user_id}"')
rows = cur.fetchall()
for row in rows:
print(row)
def main():
# Enter Path where you want your database to be:
database = r"database.db"
# create a database connection => Db will be created if there does not exists one.
conn = create_connection(database)
with conn:
print("Get User: ")
fetch_user(conn, "Poornartha Sawant")
fetch_slots(conn, 1)
if __name__ == '__main__':
main()
|
the-stack_106_19565
|
import dataclasses
import numpy as np
import tensorflow as tf
def _standard_scaled_mse(std):
std = tf.constant(std, dtype=std.dtype)
def custom_loss(y_true, y_pred):
return tf.math.reduce_mean(
tf.math.reduce_mean(tf.math.square((y_pred - y_true) / std), axis=0)
)
return custom_loss
def _standard_scaled_mae(std):
std = tf.constant(std, dtype=std.dtype)
def custom_loss(y_true, y_pred):
return tf.math.reduce_mean(
tf.math.reduce_mean(tf.math.abs((y_pred - y_true) / std), axis=0)
)
return custom_loss
def _uniform_scaled_mse(std):
factor = tf.constant(1.0 / np.mean(std ** 2), dtype=std.dtype)
def custom_loss(y_true, y_pred):
return tf.math.scalar_mul(factor, tf.losses.mse(y_true, y_pred))
return custom_loss
def _uniform_scaled_mae(std):
factor = tf.constant(1.0 / np.mean(std), dtype=std.dtype)
def custom_loss(y_true, y_pred):
return tf.math.scalar_mul(factor, tf.losses.mae(y_true, y_pred))
return custom_loss
def multiply_loss_by_factor(original_loss, factor):
def loss(y_true, y_pred):
return tf.math.scalar_mul(factor, original_loss(y_true, y_pred))
return loss
@dataclasses.dataclass
class LossConfig:
"""
Attributes:
loss_type: one of "mse" or "mae"
scaling: "standard" corresponds to scaling each feature's lossby
its scale, "standard_uniform" corresponds to scaling
each feature's loss by the mean of all feature scales, where
the scale is variance for MSE loss or standard deviation
for MAE loss
weight: A scaling factor by which to modify this loss
"""
loss_type: str = "mse"
scaling: str = "standard_uniform"
weight: float = 1.0
def __post_init__(self):
if self.loss_type not in ("mse", "mae"):
raise ValueError(
f"loss_type must be 'mse' or 'mae', got '{self.loss_type}'"
)
if self.scaling not in ("standard", "standard_uniform"):
raise ValueError(
"loss_type must be 'standard' or 'standard_uniform', "
f"got '{self.scaling}'"
)
def loss(self, std: np.ndarray) -> tf.keras.losses.Loss:
"""
Returns the loss function described by the configuration.
Args:
std: standard deviation of the output features
Returns:
loss: keras loss function
"""
if self.loss_type == "mse":
if self.scaling == "standard_uniform":
loss = _uniform_scaled_mse(std)
elif self.scaling == "standard":
loss = _standard_scaled_mse(std)
elif self.loss_type == "mae":
if self.scaling == "standard_uniform":
loss = _uniform_scaled_mae(std)
elif self.scaling == "standard":
loss = _standard_scaled_mae(std)
else:
raise NotImplementedError(f"loss_type {self.loss_type} is not implemented")
if self.weight != 1.0:
loss = multiply_loss_by_factor(loss, self.weight)
return loss
|
the-stack_106_19566
|
from typing import Any, List, cast
from src.helpers.general import findInListOfDicts
from src.libs.Web3Client.exceptions import NetworkNotFound
from src.libs.Web3Client.types import NetworkConfig
from web3.middleware import geth_poa_middleware
supportedNetworks: List[NetworkConfig] = [
# Ethereum
{
"name": "Ethereum",
"txType": 1,
"chainId": 1,
"middlewares": [],
},
# Avalanche C Chain
{
"name": "Avalanche",
"txType": 2,
"chainId": 43114,
"middlewares": [geth_poa_middleware],
},
# Swimmer Network Avalanche subnet
{
"name": "SwimmerNetwork",
"txType": 1,
"chainId": 73772,
"middlewares": [geth_poa_middleware],
},
]
def getNetworkConfig(networkName: str) -> NetworkConfig:
"""
Return the configuration for the network with the given
name; raises an exception if not found
"""
network: NetworkConfig = findInListOfDicts(
cast(Any, supportedNetworks), "name", networkName
)
if network is None:
raise NetworkNotFound(f"Network '{networkName}' not supported")
return network
def isNetworkSupported(networkName: str) -> bool:
"""
Return true if the given network is supported by the client
"""
try:
getNetworkConfig(networkName)
return True
except NetworkNotFound:
return False
|
the-stack_106_19567
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_svm
short_description: Manage NetApp ONTAP svm
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team ([email protected])
description:
- Create, modify or delete svm on NetApp ONTAP
options:
state:
description:
- Whether the specified SVM should exist or not.
choices: ['present', 'absent']
default: 'present'
name:
description:
- The name of the SVM to manage.
required: true
from_name:
description:
- Name of the SVM to be renamed
version_added: '2.7'
root_volume:
description:
- Root volume of the SVM.
- Cannot be modified after creation.
root_volume_aggregate:
description:
- The aggregate on which the root volume will be created.
- Cannot be modified after creation.
root_volume_security_style:
description:
- Security Style of the root volume.
- When specified as part of the vserver-create,
this field represents the security style for the Vserver root volume.
- When specified as part of vserver-get-iter call,
this will return the list of matching Vservers.
- The 'unified' security style, which applies only to Infinite Volumes,
cannot be applied to a Vserver's root volume.
- Cannot be modified after creation.
choices: ['unix', 'ntfs', 'mixed', 'unified']
allowed_protocols:
description:
- Allowed Protocols.
- When specified as part of a vserver-create,
this field represent the list of protocols allowed on the Vserver.
- When part of vserver-get-iter call,
this will return the list of Vservers
which have any of the protocols specified
as part of the allowed-protocols.
- When part of vserver-modify,
this field should include the existing list
along with new protocol list to be added to prevent data disruptions.
- Possible values
- nfs NFS protocol,
- cifs CIFS protocol,
- fcp FCP protocol,
- iscsi iSCSI protocol,
- ndmp NDMP protocol,
- http HTTP protocol,
- nvme NVMe protocol
aggr_list:
description:
- List of aggregates assigned for volume operations.
- These aggregates could be shared for use with other Vservers.
- When specified as part of a vserver-create,
this field represents the list of aggregates
that are assigned to the Vserver for volume operations.
- When part of vserver-get-iter call,
this will return the list of Vservers
which have any of the aggregates specified as part of the aggr-list.
ipspace:
description:
- IPSpace name
- Cannot be modified after creation.
version_added: '2.7'
snapshot_policy:
description:
- Default snapshot policy setting for all volumes of the Vserver.
This policy will be assigned to all volumes created in this
Vserver unless the volume create request explicitly provides a
snapshot policy or volume is modified later with a specific
snapshot policy. A volume-level snapshot policy always overrides
the default Vserver-wide snapshot policy.
version_added: '2.7'
language:
description:
- Language to use for the SVM
- Default to C.UTF-8
- Possible values Language
- c POSIX
- ar Arabic
- cs Czech
- da Danish
- de German
- en English
- en_us English (US)
- es Spanish
- fi Finnish
- fr French
- he Hebrew
- hr Croatian
- hu Hungarian
- it Italian
- ja Japanese euc-j
- ja_v1 Japanese euc-j
- ja_jp.pck Japanese PCK (sjis)
- ja_jp.932 Japanese cp932
- ja_jp.pck_v2 Japanese PCK (sjis)
- ko Korean
- no Norwegian
- nl Dutch
- pl Polish
- pt Portuguese
- ro Romanian
- ru Russian
- sk Slovak
- sl Slovenian
- sv Swedish
- tr Turkish
- zh Simplified Chinese
- zh.gbk Simplified Chinese (GBK)
- zh_tw Traditional Chinese euc-tw
- zh_tw.big5 Traditional Chinese Big 5
version_added: '2.7'
subtype:
description:
- The subtype for vserver to be created.
- Cannot be modified after creation.
choices: ['default', 'dp_destination', 'sync_source', 'sync_destination']
version_added: '2.7'
'''
EXAMPLES = """
- name: Create SVM
na_ontap_svm:
state: present
name: ansibleVServer
root_volume: vol1
root_volume_aggregate: aggr1
root_volume_security_style: mixed
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapSVM(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=[
'present', 'absent'], default='present'),
name=dict(required=True, type='str'),
from_name=dict(required=False, type='str'),
root_volume=dict(type='str'),
root_volume_aggregate=dict(type='str'),
root_volume_security_style=dict(type='str', choices=['unix',
'ntfs',
'mixed',
'unified'
]),
allowed_protocols=dict(type='list'),
aggr_list=dict(type='list'),
ipspace=dict(type='str', required=False),
snapshot_policy=dict(type='str', required=False),
language=dict(type='str', required=False),
subtype=dict(choices=['default', 'dp_destination', 'sync_source', 'sync_destination'])
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.from_name = p['from_name']
self.root_volume = p['root_volume']
self.root_volume_aggregate = p['root_volume_aggregate']
self.root_volume_security_style = p['root_volume_security_style']
self.allowed_protocols = p['allowed_protocols']
self.aggr_list = p['aggr_list']
self.language = p['language']
self.ipspace = p['ipspace']
self.snapshot_policy = p['snapshot_policy']
self.subtype = p['subtype']
if HAS_NETAPP_LIB is False:
self.module.fail_json(
msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_vserver(self, vserver_name=None):
"""
Checks if vserver exists.
:return:
vserver object if vserver found
None if vserver is not found
:rtype: object/None
"""
if vserver_name is None:
vserver_name = self.name
vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-info', **{'vserver-name': vserver_name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
vserver_info.add_child_elem(query)
result = self.server.invoke_successfully(vserver_info,
enable_tunneling=False)
vserver_details = None
if (result.get_child_by_name('num-records') and
int(result.get_child_content('num-records')) >= 1):
attributes_list = result.get_child_by_name('attributes-list')
vserver_info = attributes_list.get_child_by_name('vserver-info')
aggr_list = list()
''' vserver aggr-list can be empty by default'''
get_list = vserver_info.get_child_by_name('aggr-list')
if get_list is not None:
aggregates = get_list.get_children()
for aggr in aggregates:
aggr_list.append(aggr.get_content())
protocols = list()
'''allowed-protocols is not empty by default'''
get_protocols = vserver_info.get_child_by_name(
'allowed-protocols').get_children()
for protocol in get_protocols:
protocols.append(protocol.get_content())
vserver_details = {'name': vserver_info.get_child_content('vserver-name'),
'root_volume': vserver_info.get_child_content('root-volume'),
'root_volume_aggregate': vserver_info.get_child_content('root-volume-aggregate'),
'root_volume_security_style': vserver_info.get_child_content('root-volume-security-style'),
'subtype': vserver_info.get_child_content('vserver-subtype'),
'aggr_list': aggr_list,
'language': vserver_info.get_child_content('language'),
'snapshot_policy': vserver_info.get_child_content('snapshot-policy'),
'allowed_protocols': protocols}
return vserver_details
def create_vserver(self):
options = {'vserver-name': self.name, 'root-volume': self.root_volume}
if self.root_volume_aggregate is not None:
options['root-volume-aggregate'] = self.root_volume_aggregate
if self.root_volume_security_style is not None:
options['root-volume-security-style'] = self.root_volume_security_style
if self.language is not None:
options['language'] = self.language
if self.ipspace is not None:
options['ipspace'] = self.ipspace
if self.snapshot_policy is not None:
options['snapshot-policy'] = self.snapshot_policy
if self.subtype is not None:
options['vserver-subtype'] = self.subtype
vserver_create = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-create', **options)
try:
self.server.invoke_successfully(vserver_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error provisioning SVM %s \
with root volume %s on aggregate %s: %s'
% (self.name, self.root_volume,
self.root_volume_aggregate, to_native(e)),
exception=traceback.format_exc())
def delete_vserver(self):
vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-destroy', **{'vserver-name': self.name})
try:
self.server.invoke_successfully(vserver_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error deleting SVM %s \
with root volume %s on aggregate %s: %s'
% (self.name, self.root_volume,
self.root_volume_aggregate, to_native(e)),
exception=traceback.format_exc())
def rename_vserver(self):
vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-rename', **{'vserver-name': self.from_name,
'new-name': self.name})
try:
self.server.invoke_successfully(vserver_rename,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error renaming SVM %s: %s'
% (self.name, to_native(e)),
exception=traceback.format_exc())
def modify_vserver(self, allowed_protocols, aggr_list, language, snapshot_policy):
options = {'vserver-name': self.name}
if language:
options['language'] = self.language
if snapshot_policy:
options['snapshot-policy'] = self.snapshot_policy
vserver_modify = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-modify', **options)
if allowed_protocols:
allowed_protocols = netapp_utils.zapi.NaElement(
'allowed-protocols')
for protocol in self.allowed_protocols:
allowed_protocols.add_new_child('protocol', protocol)
vserver_modify.add_child_elem(allowed_protocols)
if aggr_list:
aggregates = netapp_utils.zapi.NaElement('aggr-list')
for aggr in self.aggr_list:
aggregates.add_new_child('aggr-name', aggr)
vserver_modify.add_child_elem(aggregates)
try:
self.server.invoke_successfully(vserver_modify,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error modifying SVM %s: %s'
% (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
vserver_details = self.get_vserver()
# These are being commentted out as part of bugfix 595.
# if vserver_details is not None:
# results = netapp_utils.get_cserver(self.server)
# cserver = netapp_utils.setup_ontap_zapi(
# module=self.module, vserver=results)
# netapp_utils.ems_log_event("na_ontap_svm", cserver)
rename_vserver = False
modify_protocols = False
modify_aggr_list = False
modify_snapshot_policy = False
modify_language = False
if vserver_details is not None:
if self.state == 'absent':
changed = True
elif self.state == 'present':
# SVM is present, is it a modify?
if self.allowed_protocols is not None:
self.allowed_protocols.sort()
vserver_details['allowed_protocols'].sort()
if self.allowed_protocols != vserver_details['allowed_protocols']:
modify_protocols = True
changed = True
if self.aggr_list is not None:
self.aggr_list.sort()
vserver_details['aggr_list'].sort()
if self.aggr_list != vserver_details['aggr_list']:
modify_aggr_list = True
changed = True
if self.snapshot_policy is not None:
if self.snapshot_policy != vserver_details['snapshot_policy']:
modify_snapshot_policy = True
changed = True
if self.language is not None:
if self.language != vserver_details['language']:
modify_language = True
changed = True
if self.root_volume is not None and self.root_volume != vserver_details['root_volume']:
self.module.fail_json(msg='Error modifying SVM %s: %s' % (self.name, 'cannot change root volume'))
if self.root_volume_aggregate is not None and self.root_volume_aggregate != vserver_details['root_volume_aggregate']:
self.module.fail_json(msg='Error modifying SVM %s: %s' % (self.name, 'cannot change root volume aggregate'))
if self.root_volume_security_style is not None and self.root_volume_security_style != vserver_details['root_volume_security_style']:
self.module.fail_json(msg='Error modifying SVM %s: %s' % (self.name, 'cannot change root volume security style'))
if self.subtype is not None and self.subtype != vserver_details['subtype']:
self.module.fail_json(msg='Error modifying SVM %s: %s' % (self.name, 'cannot change subtype'))
if self.ipspace is not None and self.ipspace != vserver_details['ipspace']:
self.module.fail_json(msg='Error modifying SVM %s: %s' % (self.name, 'cannot change ipspace'))
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if vserver_details is None:
# create or rename
if self.from_name is not None and self.get_vserver(self.from_name):
self.rename_vserver()
else:
self.create_vserver()
else:
if modify_protocols or modify_aggr_list:
self.modify_vserver(
modify_protocols, modify_aggr_list, modify_language, modify_snapshot_policy)
elif self.state == 'absent':
self.delete_vserver()
self.module.exit_json(changed=changed)
def main():
v = NetAppOntapSVM()
v.apply()
if __name__ == '__main__':
main()
|
the-stack_106_19568
|
import pickle
from typing import IO, Optional
import numpy as np
from ..definitions import (DenseMatrix, DenseScoreArray, InteractionMatrix,
UserIndexArray)
from ._ials import IALSLearningConfigBuilder
from ._ials import IALSTrainer as CoreTrainer
from .base import (BaseRecommenderWithItemEmbedding,
BaseRecommenderWithThreadingSupport,
BaseRecommenderWithUserEmbedding)
from .base_earlystop import BaseRecommenderWithEarlyStopping, TrainerBase
class IALSTrainer(TrainerBase):
def __init__(
self,
X: InteractionMatrix,
n_components: int,
alpha: float,
reg: float,
init_std: float,
n_thread: int,
):
X_all_f32 = X.astype(np.int32)
config = (
IALSLearningConfigBuilder()
.set_K(n_components)
.set_init_stdev(init_std)
.set_alpha(alpha)
.set_reg(reg)
.set_n_threads(n_thread)
.build()
)
self.core_trainer = CoreTrainer(config, X_all_f32)
def load_state(self, ifs: IO) -> None:
params = pickle.load(ifs)
self.core_trainer.user = params["user"]
self.core_trainer.item = params["item"]
def save_state(self, ofs: IO) -> None:
pickle.dump(
dict(user=self.core_trainer.user, item=self.core_trainer.item),
ofs,
protocol=pickle.HIGHEST_PROTOCOL,
)
def run_epoch(self) -> None:
self.core_trainer.step()
class IALSRecommender(
BaseRecommenderWithEarlyStopping,
BaseRecommenderWithThreadingSupport,
BaseRecommenderWithUserEmbedding,
BaseRecommenderWithItemEmbedding,
):
"""
Implicit Alternating Least Squares (IALS).
See:
Y. Hu, Y. Koren and C. Volinsky, Collaborative filtering for implicit feedback datasets, ICDM 2008.
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.167.5120&rep=rep1&type=pdf
"""
def __init__(
self,
X_all: InteractionMatrix,
n_components: int = 20,
alpha: float = 1.0,
reg: float = 1e-3,
init_std: float = 0.1,
validate_epoch: int = 5,
score_degradation_max: int = 5,
n_thread: Optional[int] = 1,
max_epoch: int = 300,
):
super().__init__(
X_all,
max_epoch=max_epoch,
validate_epoch=validate_epoch,
score_degration_max=score_degradation_max,
n_thread=n_thread,
)
self.n_components = n_components
self.alpha = alpha
self.reg = reg
self.init_std = init_std
self.trainer: Optional[IALSTrainer] = None
def create_trainer(self) -> TrainerBase:
return IALSTrainer(
self.X_all,
self.n_components,
self.alpha,
self.reg,
self.init_std,
self.n_thread,
)
def get_score(self, index: UserIndexArray) -> DenseScoreArray:
if self.trainer is None:
raise RuntimeError("'get_score' called before training")
return self.trainer.core_trainer.user[index].dot(
self.trainer.core_trainer.item.T
)
def get_score_block(self, begin: int, end: int) -> DenseScoreArray:
if self.trainer is None:
raise RuntimeError("'get_score_block' called before training")
return self.trainer.core_trainer.user_scores(begin, end)
def get_score_cold_user(self, X: InteractionMatrix) -> DenseScoreArray:
if self.trainer is None:
raise RuntimeError("'get_score_cols_user' called before training")
user_vector = self.trainer.core_trainer.transform_user(
X.astype(np.float32).tocsr()
)
return user_vector.dot(self.trainer.core_trainer.item.T).astype(np.float64)
def get_user_embedding(self) -> DenseMatrix:
if self.trainer is None:
raise RuntimeError("'get_user_embedding' called before training")
return self.trainer.core_trainer.user.astype(np.float64)
def get_score_from_user_embedding(
self, user_embedding: DenseMatrix
) -> DenseScoreArray:
if self.trainer is None:
raise RuntimeError("'get_score_from_user_embedding' called before training")
return user_embedding.dot(self.trainer.core_trainer.item.T)
def get_item_embedding(self) -> DenseMatrix:
if self.trainer is None:
raise RuntimeError("'get_item_embedding' called before training")
return self.trainer.core_trainer.item.astype(np.float64)
def get_score_from_item_embedding(
self, user_indices: UserIndexArray, item_embedding: DenseMatrix
) -> DenseScoreArray:
if self.trainer is None:
raise RuntimeError("'get_score_from_item_embedding' called before training")
return (
self.trainer.core_trainer.user[user_indices]
.dot(item_embedding.T)
.astype(np.float64)
)
|
the-stack_106_19571
|
from freefall.falling_objects import frc_power_cell
from freefall.simulators import simulate_earth_surface
from freefall.simulators import terminate_vy_less_zero
from freefall.utilities import find_vx_vy, float_range
import matplotlib.pyplot as plt
X_INITIAL = 0 # m
Y_INITIAL = 27 / 40 # m
SPEED = 5 # m/s
ANGLE = 50 # degrees
fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2)
# Plot the trajectory over several valves of speed
ax0.set(
title="Power Cell Trajectory by Speed", xlabel="Distance (m)", ylabel="Height (m)"
)
ax0.grid()
for i in float_range(2, 8, 0.1):
# run the simulation
vx_initial, vy_initial = find_vx_vy(speed=i, angle=ANGLE)
results = simulate_earth_surface(
frc_power_cell,
X_INITIAL,
Y_INITIAL,
vx_initial,
vy_initial,
terminator=terminate_vy_less_zero,
)
# Plot the results
ax0.plot(results.x, results.y)
# Plot the trajectory over several valves of angle
ax1.set(title="Power Cell Trajectory by Angle", xlabel="Distance (m)")
ax1.grid()
for i in float_range(10, 90, 2):
# run the simulation
vx_initial, vy_initial = find_vx_vy(speed=SPEED, angle=i)
results = simulate_earth_surface(
frc_power_cell,
X_INITIAL,
Y_INITIAL,
vx_initial,
vy_initial,
terminator=terminate_vy_less_zero,
)
# Plot the results
ax1.plot(results.x, results.y)
# Display the graph
plt.show()
|
the-stack_106_19572
|
#
# Copyright (c) 2017 NORDUnet A/S
# Copyright (c) 2018 SUNET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from flask import request
from eduid_common.authn import fido_tokens
from eduid_common.session import session
from eduid_webapp.actions.action_abc import ActionPlugin
from eduid_webapp.actions.app import ActionsApp
from eduid_webapp.actions.app import current_actions_app as current_app
from eduid_webapp.actions.helpers import ActionsMsg
__author__ = 'ft'
class Plugin(ActionPlugin):
PLUGIN_NAME = 'mfa'
PACKAGE_NAME = 'eduid_webapp.actions.actions.mfa'
steps = 1
@classmethod
def includeme(cls, app: ActionsApp):
if not app.conf.eidas_url:
app.logger.error(f'The configuration option eidas_url is required with plugin MFA')
if not app.conf.mfa_authn_idp:
app.logger.error(f'The configuration option mfa_authn_idp is required with plugin MFA')
app.conf.mfa_testing = False
def get_config_for_bundle(self, action):
eppn = action.eppn
user = current_app.central_userdb.get_user_by_eppn(eppn, raise_on_missing=False)
current_app.logger.debug('Loaded User {} from db'.format(user))
if not user:
raise self.ActionError(ActionsMsg.user_not_found)
config = fido_tokens.start_token_verification(user, self.PACKAGE_NAME, current_app.conf.fido2_rp_id)
# Explicit check for boolean True
if current_app.conf.mfa_testing is True:
current_app.logger.info('MFA test mode is enabled')
config['testing'] = True
else:
config['testing'] = False
# Add config for external mfa auth
config['eidas_url'] = current_app.conf.eidas_url
config['mfa_authn_idp'] = current_app.conf.mfa_authn_idp
return config
def perform_step(self, action):
current_app.logger.debug('Performing MFA step')
if current_app.conf.mfa_testing:
current_app.logger.debug('Test mode is on, faking authentication')
return {
'success': True,
'testing': True,
}
eppn = action.eppn
user = current_app.central_userdb.get_user_by_eppn(eppn, raise_on_missing=False)
current_app.logger.debug('Loaded User {} from db (in perform_action)'.format(user))
# Third party service MFA
if session.mfa_action.success is True: # Explicit check that success is the boolean True
issuer = session.mfa_action.issuer
authn_instant = session.mfa_action.authn_instant
authn_context = session.mfa_action.authn_context
current_app.logger.info('User {} logged in using external mfa service {}'.format(user, issuer))
action.result = {
'success': True,
'issuer': issuer,
'authn_instant': authn_instant,
'authn_context': authn_context,
}
current_app.actions_db.update_action(action)
# Clear mfa_action from session
del session.mfa_action
return action.result
req_json = request.get_json()
if not req_json:
current_app.logger.error('No data in request to authn {}'.format(user))
raise self.ActionError(ActionsMsg.no_data)
# Process POSTed data
if 'tokenResponse' in req_json:
# CTAP1/U2F
token_response = request.get_json().get('tokenResponse', '')
current_app.logger.debug('U2F token response: {}'.format(token_response))
challenge = session.get(self.PACKAGE_NAME + '.u2f.challenge')
current_app.logger.debug('Challenge: {!r}'.format(challenge))
result = fido_tokens.verify_u2f(user, challenge, token_response, current_app.conf.u2f_valid_facets)
if result is not None:
action.result = result
current_app.actions_db.update_action(action)
return action.result
elif 'authenticatorData' in req_json:
# CTAP2/Webauthn
try:
result = fido_tokens.verify_webauthn(user, req_json, self.PACKAGE_NAME, current_app.conf.fido2_rp_id)
except fido_tokens.VerificationProblem as exc:
raise self.ActionError(exc.msg)
action.result = result
current_app.actions_db.update_action(action)
return action.result
else:
current_app.logger.error('Neither U2F nor Webauthn data in request to authn {}'.format(user))
current_app.logger.debug('Request: {}'.format(req_json))
raise self.ActionError(ActionsMsg.no_response)
raise self.ActionError(ActionsMsg.unknown_token)
|
the-stack_106_19573
|
# Authors: Robert Luke <[email protected]>
# Eric Larson <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import pytest
import numpy as np
from mne.datasets.testing import data_path
from mne.io import read_raw_nirx, BaseRaw, read_raw_fif
from mne.preprocessing.nirs import optical_density, beer_lambert_law
from mne.utils import _validate_type
from mne.datasets import testing
from mne.externals.pymatreader import read_mat
fname_nirx_15_0 = op.join(data_path(download=False),
'NIRx', 'nirx_15_0_recording')
fname_nirx_15_2 = op.join(data_path(download=False),
'NIRx', 'nirx_15_2_recording')
fname_nirx_15_2_short = op.join(data_path(download=False),
'NIRx', 'nirx_15_2_recording_w_short')
@testing.requires_testing_data
@pytest.mark.parametrize('fname', ([fname_nirx_15_2_short, fname_nirx_15_2,
fname_nirx_15_0]))
@pytest.mark.parametrize('fmt', ('nirx', 'fif'))
def test_beer_lambert(fname, fmt, tmpdir):
"""Test converting NIRX files."""
assert fmt in ('nirx', 'fif')
raw = read_raw_nirx(fname)
if fmt == 'fif':
raw.save(tmpdir.join('test_raw.fif'))
raw = read_raw_fif(tmpdir.join('test_raw.fif'))
assert 'fnirs_raw' in raw
assert 'fnirs_od' not in raw
raw = optical_density(raw)
_validate_type(raw, BaseRaw, 'raw')
assert 'fnirs_raw' not in raw
assert 'fnirs_od' in raw
raw = beer_lambert_law(raw)
_validate_type(raw, BaseRaw, 'raw')
assert 'fnirs_raw' not in raw
assert 'fnirs_od' not in raw
assert 'hbo' in raw
assert 'hbr' in raw
@testing.requires_testing_data
def test_beer_lambert_unordered_errors():
"""NIRS data requires specific ordering and naming of channels."""
raw = read_raw_nirx(fname_nirx_15_0)
raw_od = optical_density(raw)
raw_od.pick([0, 1, 2])
with pytest.raises(ValueError, match='ordered'):
beer_lambert_law(raw_od)
# Test that an error is thrown if channel naming frequency doesn't match
# what is stored in loc[9], which should hold the light frequency too.
raw_od = optical_density(raw)
raw_od.rename_channels({'S2_D2 760': 'S2_D2 770'})
with pytest.raises(ValueError, match='frequency do not match'):
beer_lambert_law(raw_od)
# Test that an error is thrown if inconsistent frequencies used in data
raw_od.info['chs'][2]['loc'][9] = 770.0
with pytest.raises(ValueError, match='pairs with frequencies'):
beer_lambert_law(raw_od)
@testing.requires_testing_data
def test_beer_lambert_v_matlab():
"""Compare MNE results to MATLAB toolbox."""
raw = read_raw_nirx(fname_nirx_15_0)
raw = optical_density(raw)
raw = beer_lambert_law(raw, ppf=0.121)
raw._data *= 1e6 # Scale to uM for comparison to MATLAB
matlab_fname = op.join(data_path(download=False),
'NIRx', 'validation', 'nirx_15_0_recording_bl.mat')
matlab_data = read_mat(matlab_fname)
for idx in range(raw.get_data().shape[0]):
mean_error = np.mean(matlab_data['data'][:, idx] -
raw._data[idx])
assert mean_error < 0.1
matlab_name = ("S" + str(int(matlab_data['sources'][idx])) +
"_D" + str(int(matlab_data['detectors'][idx])) +
" " + matlab_data['type'][idx])
assert raw.info['ch_names'][idx] == matlab_name
|
the-stack_106_19575
|
import hydra
import os
import torch
from tqdm import tqdm, trange
from tacotron.utils import reset_logging, set_seed, get_abspath, ResultWriter
from tacotron import get_process, get_model, get_vocgan
from tacotron.configs import NonAttentiveTacotronConfig
from hydra.core.config_store import ConfigStore
import logging
from transformers import (
get_linear_schedule_with_warmup,
)
from tacotron.vocgan_generator import Generator
import soundfile as sf
def init():
cs = ConfigStore.instance()
## base
cs.store(group="base", name='non_taco', node=NonAttentiveTacotronConfig)
@hydra.main(config_path=os.path.join(".", "configs"), config_name="train")
def main(cfg):
## Resent Logging
reset_logging()
args = cfg.base
## GPU setting
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
set_seed(args.seed)
## load_dataset
processor = get_process(args)(args)
tokenizer = processor.build_tokenizer()
train_dataset = processor.get_dataset(tokenizer, split='train')
valid_dataset = processor.get_dataset(tokenizer, split='valid')
args.num_labels = tokenizer.get_num_labels()
args.num_special_labels = tokenizer.get_num_special_labels()
model = get_model(args)(args)
model.to(args.device)
generator = None
generator_path = get_abspath(args.generator_path)
if os.path.exists(generator_path):
generator = get_vocgan(generator_path)
generator.to(args.device)
generator.eval()
## test
# train_dataloader = train_dataset.load_dataloader(
# shuffle=False, batch_size=2
# )
# batch = train_dataset.__getitem__(2)
# batch = {key: (item.to(args.device) if type(item) == torch.Tensor else item) for key, item in batch.items()}
# print(batch)
#
# print(tokenizer.decode(batch['input_ids'], batch['special_input_ids']))
#
# print("shape", batch['mel_specs'].shape)
# print(batch['mel_specs'])
#
# audio = generator.generate_audio(batch['mel_specs'])
# sf.write('sample1.wav', audio, args.sampling_rate, 'PCM_24')
#
# for batch in train_dataloader:
# print('confirm1', tokenizer.decode(batch['input_ids'][0], batch['special_input_ids'][0]))
# print('confirm2', tokenizer.decode(batch['input_ids'][1], batch['special_input_ids'][1]))
#
# batch = {key: (item.to(args.device) if type(item) == torch.Tensor else item) for key, item in batch.items()}
#
# #print(batch['durations'])
# #print(batch['input_ids'])
# mel_specs = batch['mel_specs'].squeeze()
# print("shape", mel_specs.shape)
# print(mel_specs)
#
# #net_output = model(**batch)
#
# ## generate audio
# audio = generator.generate_audio(mel_specs[0])
# ## save audio
# sf.write('sample1.wav', audio, args.sampling_rate, 'PCM_24')
#
#
# #loss = model.get_loss(**net_output, **batch)
# #print('loss', loss)
# break
#
## train model
writer = ResultWriter(args.experiments_path)
results = {}
## training
train_results = train(args, train_dataset, valid_dataset, model, tokenizer, generator)
results.update(**train_results)
writer.update(args, **results)
def train(args, train_dataset, valid_dataset, model, tokenizer, generator=None):
logging.info("start training")
## load dataloader
train_dataloader = train_dataset.load_dataloader(
shuffle=True, batch_size=args.train_batch_size
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = (
args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
)
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, eps=args.weight_decay)
args.warmup_steps = int(args.warmup_percent * t_total)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
logging.info(" Num Epochs = %d", args.num_train_epochs)
logging.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logging.info(" Total optimization steps = %d", t_total)
logging.info(" Train Batch size = %d", args.train_batch_size)
logging.info(" Train Data size = %d", len(train_dataset))
step = 0
global_step = 0
best_loss = 1e10
best_loss_step = 0
stop_iter = False
#train_iterator = trange(0, int(args.num_train_epochs), desc="Epoch")
model.zero_grad()
for epoch_idx in range(0, int(args.num_train_epochs)):
## load dataloader
train_dataloader = train_dataset.load_dataloader(
shuffle=True, batch_size=args.train_batch_size
)
for batch in train_dataloader:
step += 1
model.train()
batch = {key: (item.to(args.device) if type(item) == torch.Tensor else item) for key, item in batch.items()}
net_output = model(**batch)
loss = model.get_loss(**net_output, **batch)
final_loss = loss['loss']
if args.gradient_accumulation_steps > 1:
final_loss = final_loss / args.gradient_accumulation_steps
if args.n_gpu > 1:
final_loss = final_loss.mean()
if args.fp16:
with amp.scale_loss(final_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
final_loss.backward()
#train_iterator.set_postfix_str(s="loss = {:.8f}".format(float(final_loss)), refresh=True)
if step % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.grad_clip_thresh)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_thresh)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
## logging
if (args.logging_steps > 0 and global_step % args.logging_steps == 0 and global_step > 0):
logging.info("***** epoch [{}] loss [{:.4}] b_mse [{:.4}] b_mae [{:.4}] a_mse [{:.4}] a_mae [{:.4}] duration [{:.4}] *****".format(
str(epoch_idx),
loss['loss'].detach().cpu().item(),
loss['predicted_mse'].detach().cpu().item(),
loss['predicted_mae'].detach().cpu().item(),
loss['predicted_postnet_mse'].detach().cpu().item(),
loss['predicted_postnet_mae'].detach().cpu().item(),
loss['predicted_durations_mse'].detach().cpu().item(),
)
)
if (args.steps_per_evaluate > 0 and global_step % args.steps_per_evaluate == 0 and global_step > 0):
## audio prepare path
audio_save_path = os.path.join(args.save_path, 'audio', str(global_step))
audio_save_path = get_abspath(audio_save_path)
os.makedirs(audio_save_path, exist_ok=True)
# if (args.logging_steps > 0 and global_step % args.logging_steps == 0):
results = evaluate(args, valid_dataset, model, generator, audio_save_path)
eval_loss = results['loss']
if eval_loss < best_loss:
best_loss = eval_loss
best_loss_step = global_step
logging.info("***** best_loss : %.4f *****", best_loss)
if (args.steps_per_checkpoint > 0 and global_step % args.steps_per_checkpoint == 0 and global_step > 0):
model_save_path = os.path.join(args.save_path, 'model', str(global_step))
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(model_save_path)
tokenizer.save_pretrained(model_save_path)
if args.max_steps > 0 and global_step > args.max_steps:
stop_iter = True
break
if stop_iter:
break
return {'best_valid_loss': best_loss,
'best_valid_loss_step': best_loss_step,
}
def evaluate(args, test_dataset, model, generator=None, save_path=''):
## load dataloader
test_dataloader = test_dataset.load_dataloader(
shuffle=False, batch_size=1
)
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
logging.info("***** Running evaluation *****")
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(test_dataloader, desc="Evaluating"):
batch = {key: (item.to(args.device) if type(item) == torch.Tensor else item) for key, item in batch.items()}
with torch.no_grad():
net_output = model(**batch)
loss = model.get_loss(**net_output, **batch)
eval_loss += loss['loss'].cpu().item()
if generator is not None:
net_output = model.inference(**batch)
## generate audio
audio = generator.generate_audio(**net_output)
## save audio
sf.write(os.path.join(save_path, '{}.wav'.format(str(nb_eval_steps))), audio, args.sampling_rate, 'PCM_24')
nb_eval_steps += 1
eval_loss = eval_loss/nb_eval_steps
results = {
'loss' : eval_loss,
}
logging.info(" %s = %s", 'loss', str(results['loss']))
model.train()
return results
if __name__ == "__main__":
init()
main()
|
the-stack_106_19577
|
#!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2020 Damien P. George
# Copyright (c) 2020 Jim Mussared
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import glob
import itertools
import os
import re
import subprocess
# Relative to top-level repo dir.
PATHS = [
# C
"drivers/ninaw10/*.[ch]",
"extmod/*.[ch]",
"extmod/btstack/*.[ch]",
"extmod/nimble/*.[ch]",
"lib/mbedtls_errors/tester.c",
"shared/netutils/*.[ch]",
"shared/timeutils/*.[ch]",
"shared/runtime/*.[ch]",
"mpy-cross/*.[ch]",
"ports/*/*.[ch]",
"ports/windows/msvc/**/*.[ch]",
"ports/nrf/modules/nrf/*.[ch]",
"py/*.[ch]",
# Python
"drivers/**/*.py",
"examples/**/*.py",
"extmod/**/*.py",
"ports/**/*.py",
"ports/mimxrt/**/*.[ch]",
"py/**/*.py",
"tools/**/*.py",
"tests/**/*.py",
]
EXCLUSIONS = [
# STM32 build includes generated Python code.
"ports/*/build*",
# gitignore in ports/unix ignores *.py, so also do it here.
"ports/unix/*.py",
# not real python files
"tests/**/repl_*.py",
# needs careful attention before applying automatic formatting
"tests/basics/*.py",
]
# Path to repo top-level dir.
TOP = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
UNCRUSTIFY_CFG = os.path.join(TOP, "tools/uncrustify.cfg")
C_EXTS = (
".c",
".h",
)
PY_EXTS = (".py",)
def list_files(paths, exclusions=None, prefix=""):
files = set()
for pattern in paths:
files.update(glob.glob(os.path.join(prefix, pattern), recursive=True))
for pattern in exclusions or []:
files.difference_update(glob.fnmatch.filter(files, os.path.join(prefix, pattern)))
return sorted(files)
def fixup_c(filename):
# Read file.
with open(filename) as f:
lines = f.readlines()
# Write out file with fixups.
with open(filename, "w", newline="") as f:
dedent_stack = []
while lines:
# Get next line.
l = lines.pop(0)
# Dedent #'s to match indent of following line (not previous line).
m = re.match(r"( +)#(if |ifdef |ifndef |elif |else|endif)", l)
if m:
indent = len(m.group(1))
directive = m.group(2)
if directive in ("if ", "ifdef ", "ifndef "):
l_next = lines[0]
indent_next = len(re.match(r"( *)", l_next).group(1))
if indent - 4 == indent_next and re.match(r" +(} else |case )", l_next):
# This #-line (and all associated ones) needs dedenting by 4 spaces.
l = l[4:]
dedent_stack.append(indent - 4)
else:
# This #-line does not need dedenting.
dedent_stack.append(-1)
else:
if dedent_stack[-1] >= 0:
# This associated #-line needs dedenting to match the #if.
indent_diff = indent - dedent_stack[-1]
assert indent_diff >= 0
l = l[indent_diff:]
if directive == "endif":
dedent_stack.pop()
# Write out line.
f.write(l)
assert not dedent_stack, filename
def main():
cmd_parser = argparse.ArgumentParser(description="Auto-format C and Python files.")
cmd_parser.add_argument("-c", action="store_true", help="Format C code only")
cmd_parser.add_argument("-p", action="store_true", help="Format Python code only")
cmd_parser.add_argument("-v", action="store_true", help="Enable verbose output")
cmd_parser.add_argument("files", nargs="*", help="Run on specific globs")
args = cmd_parser.parse_args()
# Setting only one of -c or -p disables the other. If both or neither are set, then do both.
format_c = args.c or not args.p
format_py = args.p or not args.c
# Expand the globs passed on the command line, or use the default globs above.
files = []
if args.files:
files = list_files(args.files)
else:
files = list_files(PATHS, EXCLUSIONS, TOP)
# Extract files matching a specific language.
def lang_files(exts):
for file in files:
if os.path.splitext(file)[1].lower() in exts:
yield file
# Run tool on N files at a time (to avoid making the command line too long).
def batch(cmd, files, N=200):
while True:
file_args = list(itertools.islice(files, N))
if not file_args:
break
subprocess.check_call(cmd + file_args)
# Format C files with uncrustify.
if format_c:
command = ["uncrustify", "-c", UNCRUSTIFY_CFG, "-lC", "--no-backup"]
if not args.v:
command.append("-q")
batch(command, lang_files(C_EXTS))
for file in lang_files(C_EXTS):
fixup_c(file)
# Format Python files with black.
if format_py:
command = ["black", "--fast", "--line-length=99"]
if args.v:
command.append("-v")
else:
command.append("-q")
batch(command, lang_files(PY_EXTS))
if __name__ == "__main__":
main()
|
the-stack_106_19580
|
def notaPostulanteEstMultiple():
#Definir Variables
notaFinal=0
#Datos de entrada
areaCarrera=input("Introduce el area a la que corresponde tu carrera:\nB=Biomedicas\nI=Ingenieria\nS=Sociales")
notaEP=float(input("Ingrese la nota de EP:"))
notaRM=float(input("Ingrese la nota de RM:"))
notaRV=float(input("Ingrese la nota de RV:"))
notaAB=float(input("Ingrese la nota de AB:"))
#Proceso
if areaCarrera=="B":
notaFinal=(notaEP*0.40)+(notaRM*0.10)+(notaRV*0.20)+(notaAB*0.30)
areaCarrera="Biomedicas"
elif areaCarrera=="I":
notaFinal=(notaEP*0.40)+(notaRM*0.30)+(notaRV*0.15)+(notaAB*0.15)
areaCarrera="Ingenierias"
elif areaCarrera=="S":
notaFinal=(notaEP*0.40)+(notaRM*0.10)+(notaRV*0.30)+(notaAB*0.20)
areaCarrera="Sociales"
else:
print("La opcion que ingreso no es valida...intente nuevamente!.")
print("El postulante obtuvo un nota de:",notaFinal,"\nY su carrera corresponde al area de:",areaCarrera)
notaPostulanteEstMultiple()
|
the-stack_106_19582
|
import abc
import sys
import time
from collections import OrderedDict
from functools import reduce
import numba
import numpy as np
from det3d.core.bbox import box_np_ops
from det3d.core.bbox.geometry import (
is_line_segment_intersection_jit,
points_in_convex_polygon_3d_jit,
points_in_convex_polygon_jit,
)
import copy
class BatchSampler:
def __init__(
self, sampled_list, name=None, epoch=None, shuffle=True, drop_reminder=False
):
self._sampled_list = sampled_list
self._indices = np.arange(len(sampled_list))
if shuffle:
np.random.shuffle(self._indices)
self._idx = 0
self._example_num = len(sampled_list)
self._name = name
self._shuffle = shuffle
self._epoch = epoch
self._epoch_counter = 0
self._drop_reminder = drop_reminder
def _sample(self, num):
if self._idx + num >= self._example_num:
ret = self._indices[self._idx :].copy()
self._reset()
else:
ret = self._indices[self._idx : self._idx + num]
self._idx += num
return ret
def _reset(self):
# if self._name is not None:
# print("reset", self._name)
if self._shuffle:
np.random.shuffle(self._indices)
self._idx = 0
def sample(self, num):
indices = self._sample(num)
return [self._sampled_list[i] for i in indices]
# return np.random.choice(self._sampled_list, num)
class DataBasePreprocessing:
def __call__(self, db_infos):
return self._preprocess(db_infos)
@abc.abstractclassmethod
def _preprocess(self, db_infos):
pass
class DBFilterByDifficulty(DataBasePreprocessing):
def __init__(self, removed_difficulties, logger=None):
self._removed_difficulties = removed_difficulties
logger.info(f"{removed_difficulties}")
def _preprocess(self, db_infos):
new_db_infos = {}
for key, dinfos in db_infos.items():
new_db_infos[key] = [
info
for info in dinfos
if info["difficulty"] not in self._removed_difficulties
]
return new_db_infos
class DBFilterByMinNumPoint(DataBasePreprocessing):
def __init__(self, min_gt_point_dict, logger=None):
self._min_gt_point_dict = min_gt_point_dict
logger.info(f"{min_gt_point_dict}")
def _preprocess(self, db_infos):
for name, min_num in self._min_gt_point_dict.items():
if min_num > 0:
filtered_infos = []
for info in db_infos[name]:
if info["num_points_in_gt"] >= min_num:
filtered_infos.append(info)
db_infos[name] = filtered_infos
return db_infos
class DataBasePreprocessor:
def __init__(self, preprocessors):
self._preprocessors = preprocessors
def __call__(self, db_infos):
for prepor in self._preprocessors:
db_infos = prepor(db_infos)
return db_infos
def random_crop_frustum(
bboxes, rect, Trv2c, P2, max_crop_height=1.0, max_crop_width=0.9
):
num_gt = bboxes.shape[0]
crop_minxy = np.random.uniform(
[1 - max_crop_width, 1 - max_crop_height], [0.3, 0.3], size=[num_gt, 2]
)
crop_maxxy = np.ones([num_gt, 2], dtype=bboxes.dtype)
crop_bboxes = np.concatenate([crop_minxy, crop_maxxy], axis=1)
left = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if left:
crop_bboxes[:, [0, 2]] -= crop_bboxes[:, 0:1]
# crop_relative_bboxes to real bboxes
crop_bboxes *= np.tile(bboxes[:, 2:] - bboxes[:, :2], [1, 2])
crop_bboxes += np.tile(bboxes[:, :2], [1, 2])
C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
frustums = box_np_ops.get_frustum_v2(crop_bboxes, C)
frustums -= T
# frustums = np.linalg.inv(R) @ frustums.T
frustums = np.einsum("ij, akj->aki", np.linalg.inv(R), frustums)
frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
return frustums
def filter_gt_box_outside_range(gt_boxes, limit_range):
"""remove gtbox outside training range.
this function should be applied after other prep functions
Args:
gt_boxes ([type]): [description]
limit_range ([type]): [description]
"""
gt_boxes_bv = box_np_ops.center_to_corner_box2d(
gt_boxes[:, [0, 1]], gt_boxes[:, [3, 3 + 1]], gt_boxes[:, -1]
)
bounding_box = box_np_ops.minmax_to_corner_2d(
np.asarray(limit_range)[np.newaxis, ...]
)
ret = points_in_convex_polygon_jit(gt_boxes_bv.reshape(-1, 2), bounding_box)
return np.any(ret.reshape(-1, 4), axis=1)
def filter_gt_box_outside_range_by_center(gt_boxes, limit_range):
"""remove gtbox outside training range.
this function should be applied after other prep functions
Args:
gt_boxes ([type]): [description]
limit_range ([type]): [description]
"""
gt_box_centers = gt_boxes[:, :2]
bounding_box = box_np_ops.minmax_to_corner_2d(
np.asarray(limit_range)[np.newaxis, ...]
)
ret = points_in_convex_polygon_jit(gt_box_centers, bounding_box)
return ret.reshape(-1)
def filter_gt_low_points(gt_boxes, points, num_gt_points, point_num_threshold=2):
points_mask = np.ones([points.shape[0]], np.bool)
gt_boxes_mask = np.ones([gt_boxes.shape[0]], np.bool)
for i, num in enumerate(num_gt_points):
if num <= point_num_threshold:
masks = box_np_ops.points_in_rbbox(points, gt_boxes[i : i + 1])
masks = masks.reshape([-1])
points_mask &= np.logical_not(masks)
gt_boxes_mask[i] = False
return gt_boxes[gt_boxes_mask], points[points_mask]
def mask_points_in_corners(points, box_corners):
surfaces = box_np_ops.corner_to_surfaces_3d(box_corners)
mask = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
return mask
@numba.njit
def _rotation_matrix_3d_(rot_mat_T, angle, axis):
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T[:] = np.eye(3)
if axis == 1:
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 2] = -rot_sin
rot_mat_T[2, 0] = rot_sin
rot_mat_T[2, 2] = rot_cos
elif axis == 2 or axis == -1:
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
elif axis == 0:
rot_mat_T[1, 1] = rot_cos
rot_mat_T[1, 2] = -rot_sin
rot_mat_T[2, 1] = rot_sin
rot_mat_T[2, 2] = rot_cos
@numba.njit
def _rotation_box2d_jit_(corners, angle, rot_mat_T):
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
corners[:] = corners @ rot_mat_T
@numba.jit(nopython=True)
def _box_single_to_corner_jit(boxes):
num_box = boxes.shape[0]
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(1, 4, 2)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype)
for i in range(num_box):
rot_sin = np.sin(boxes[i, -1])
rot_cos = np.cos(boxes[i, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2]
return box_corners
@numba.njit
def noise_per_box(boxes, valid_mask, loc_noises, rot_noises):
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
current_corners = np.zeros((4, 2), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
# print(valid_mask)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_corners[:] = box_corners[i]
current_corners -= boxes[i, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j], rot_mat_T)
current_corners += boxes[i, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(
current_corners.reshape(1, 4, 2), box_corners
)
coll_mat[0, i] = False
# print(coll_mat)
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
break
return success_mask
@numba.njit
def noise_per_box_group(boxes, valid_mask, loc_noises, rot_noises, group_nums):
# WARNING: this function need boxes to be sorted by group id.
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_groups = group_nums.shape[0]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
max_group_num = group_nums.max()
current_corners = np.zeros((max_group_num, 4, 2), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
# print(valid_mask)
idx = 0
for num in group_nums:
if valid_mask[idx]:
for j in range(num_tests):
for i in range(num):
current_corners[i] = box_corners[i + idx]
current_corners[i] -= boxes[i + idx, :2]
_rotation_box2d_jit_(
current_corners[i], rot_noises[idx + i, j], rot_mat_T
)
current_corners[i] += (
boxes[i + idx, :2] + loc_noises[i + idx, j, :2]
)
coll_mat = box_collision_test(
current_corners[:num].reshape(num, 4, 2), box_corners
)
for i in range(num): # remove self-coll
coll_mat[i, idx : idx + num] = False
if not coll_mat.any():
for i in range(num):
success_mask[i + idx] = j
box_corners[i + idx] = current_corners[i]
break
idx += num
return success_mask
@numba.njit
def noise_per_box_group_v2_(
boxes, valid_mask, loc_noises, rot_noises, group_nums, global_rot_noises
):
# WARNING: this function need boxes to be sorted by group id.
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
max_group_num = group_nums.max()
current_box = np.zeros((1, 5), dtype=boxes.dtype)
current_corners = np.zeros((max_group_num, 4, 2), dtype=boxes.dtype)
dst_pos = np.zeros((max_group_num, 2), dtype=boxes.dtype)
current_grot = np.zeros((max_group_num,), dtype=boxes.dtype)
dst_grot = np.zeros((max_group_num,), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners_norm = corners_norm.reshape(4, 2)
# print(valid_mask)
idx = 0
for num in group_nums:
if valid_mask[idx]:
for j in range(num_tests):
for i in range(num):
current_box[0, :] = boxes[i + idx]
current_radius = np.sqrt(
current_box[0, 0] ** 2 + current_box[0, 1] ** 2
)
current_grot[i] = np.arctan2(current_box[0, 0], current_box[0, 1])
dst_grot[i] = current_grot[i] + global_rot_noises[idx + i, j]
dst_pos[i, 0] = current_radius * np.sin(dst_grot[i])
dst_pos[i, 1] = current_radius * np.cos(dst_grot[i])
current_box[0, :2] = dst_pos[i]
current_box[0, -1] += dst_grot[i] - current_grot[i]
rot_sin = np.sin(current_box[0, -1])
rot_cos = np.cos(current_box[0, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
current_corners[i] = (
current_box[0, 2:4] * corners_norm @ rot_mat_T
+ current_box[0, :2]
)
current_corners[i] -= current_box[0, :2]
_rotation_box2d_jit_(
current_corners[i], rot_noises[idx + i, j], rot_mat_T
)
current_corners[i] += (
current_box[0, :2] + loc_noises[i + idx, j, :2]
)
coll_mat = box_collision_test(
current_corners[:num].reshape(num, 4, 2), box_corners
)
for i in range(num): # remove self-coll
coll_mat[i, idx : idx + num] = False
if not coll_mat.any():
for i in range(num):
success_mask[i + idx] = j
box_corners[i + idx] = current_corners[i]
loc_noises[i + idx, j, :2] += dst_pos[i] - boxes[i + idx, :2]
rot_noises[i + idx, j] += dst_grot[i] - current_grot[i]
break
idx += num
return success_mask
@numba.njit
def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises, global_rot_noises):
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
current_corners = np.zeros((4, 2), dtype=boxes.dtype)
current_box = np.zeros((1, 5), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
dst_pos = np.zeros((2,), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners_norm = corners_norm.reshape(4, 2)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_box[0, :] = boxes[i]
current_radius = np.sqrt(boxes[i, 0] ** 2 + boxes[i, 1] ** 2)
current_grot = np.arctan2(boxes[i, 0], boxes[i, 1])
dst_grot = current_grot + global_rot_noises[i, j]
dst_pos[0] = current_radius * np.sin(dst_grot)
dst_pos[1] = current_radius * np.cos(dst_grot)
current_box[0, :2] = dst_pos
current_box[0, -1] += dst_grot - current_grot
rot_sin = np.sin(current_box[0, -1])
rot_cos = np.cos(current_box[0, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
current_corners[:] = (
current_box[0, 2:4] * corners_norm @ rot_mat_T + current_box[0, :2]
)
current_corners -= current_box[0, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j], rot_mat_T)
current_corners += current_box[0, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(
current_corners.reshape(1, 4, 2), box_corners
)
coll_mat[0, i] = False
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
loc_noises[i, j, :2] += dst_pos - boxes[i, :2]
rot_noises[i, j] += dst_grot - current_grot
break
return success_mask
@numba.njit
def points_transform_(
points, centers, point_masks, loc_transform, rot_transform, valid_mask
):
num_box = centers.shape[0]
num_points = points.shape[0]
rot_mat_T = np.zeros((num_box, 3, 3), dtype=points.dtype)
for i in range(num_box):
_rotation_matrix_3d_(rot_mat_T[i], rot_transform[i], 2)
for i in range(num_points):
for j in range(num_box):
if valid_mask[j]:
if point_masks[i, j] == 1:
points[i, :3] -= centers[j, :3]
points[i : i + 1, :3] = points[i : i + 1, :3] @ rot_mat_T[j]
points[i, :3] += centers[j, :3]
points[i, :3] += loc_transform[j]
break # only apply first box's transform
@numba.njit
def box3d_transform_(boxes, loc_transform, rot_transform, valid_mask):
num_box = boxes.shape[0]
for i in range(num_box):
if valid_mask[i]:
boxes[i, :3] += loc_transform[i]
boxes[i, 6] += rot_transform[i]
def _select_transform(transform, indices):
result = np.zeros((transform.shape[0], *transform.shape[2:]), dtype=transform.dtype)
for i in range(transform.shape[0]):
if indices[i] != -1:
result[i] = transform[i, indices[i]]
return result
@numba.njit
def group_transform_(loc_noise, rot_noise, locs, rots, group_center, valid_mask):
# loc_noise: [N, M, 3], locs: [N, 3]
# rot_noise: [N, M]
# group_center: [N, 3]
num_try = loc_noise.shape[1]
r = 0.0
x = 0.0
y = 0.0
rot_center = 0.0
for i in range(loc_noise.shape[0]):
if valid_mask[i]:
x = locs[i, 0] - group_center[i, 0]
y = locs[i, 1] - group_center[i, 1]
r = np.sqrt(x ** 2 + y ** 2)
# calculate rots related to group center
rot_center = np.arctan2(x, y)
for j in range(num_try):
loc_noise[i, j, 0] += r * (
np.sin(rot_center + rot_noise[i, j]) - np.sin(rot_center)
)
loc_noise[i, j, 1] += r * (
np.cos(rot_center + rot_noise[i, j]) - np.cos(rot_center)
)
@numba.njit
def group_transform_v2_(
loc_noise, rot_noise, locs, rots, group_center, grot_noise, valid_mask
):
# loc_noise: [N, M, 3], locs: [N, 3]
# rot_noise: [N, M]
# group_center: [N, 3]
num_try = loc_noise.shape[1]
r = 0.0
x = 0.0
y = 0.0
rot_center = 0.0
for i in range(loc_noise.shape[0]):
if valid_mask[i]:
x = locs[i, 0] - group_center[i, 0]
y = locs[i, 1] - group_center[i, 1]
r = np.sqrt(x ** 2 + y ** 2)
# calculate rots related to group center
rot_center = np.arctan2(x, y)
for j in range(num_try):
loc_noise[i, j, 0] += r * (
np.sin(rot_center + rot_noise[i, j] + grot_noise[i, j])
- np.sin(rot_center + grot_noise[i, j])
)
loc_noise[i, j, 1] += r * (
np.cos(rot_center + rot_noise[i, j] + grot_noise[i, j])
- np.cos(rot_center + grot_noise[i, j])
)
def set_group_noise_same_(loc_noise, rot_noise, group_ids):
gid_to_index_dict = {}
for i, gid in enumerate(group_ids):
if gid not in gid_to_index_dict:
gid_to_index_dict[gid] = i
for i in range(loc_noise.shape[0]):
loc_noise[i] = loc_noise[gid_to_index_dict[group_ids[i]]]
rot_noise[i] = rot_noise[gid_to_index_dict[group_ids[i]]]
def set_group_noise_same_v2_(loc_noise, rot_noise, grot_noise, group_ids):
gid_to_index_dict = {}
for i, gid in enumerate(group_ids):
if gid not in gid_to_index_dict:
gid_to_index_dict[gid] = i
for i in range(loc_noise.shape[0]):
loc_noise[i] = loc_noise[gid_to_index_dict[group_ids[i]]]
rot_noise[i] = rot_noise[gid_to_index_dict[group_ids[i]]]
grot_noise[i] = grot_noise[gid_to_index_dict[group_ids[i]]]
def get_group_center(locs, group_ids):
num_groups = 0
group_centers = np.zeros_like(locs)
group_centers_ret = np.zeros_like(locs)
group_id_dict = {}
group_id_num_dict = OrderedDict()
for i, gid in enumerate(group_ids):
if gid >= 0:
if gid in group_id_dict:
group_centers[group_id_dict[gid]] += locs[i]
group_id_num_dict[gid] += 1
else:
group_id_dict[gid] = num_groups
num_groups += 1
group_id_num_dict[gid] = 1
group_centers[group_id_dict[gid]] = locs[i]
for i, gid in enumerate(group_ids):
group_centers_ret[i] = (
group_centers[group_id_dict[gid]] / group_id_num_dict[gid]
)
return group_centers_ret, group_id_num_dict
def noise_per_object_v3_(
gt_boxes,
points=None,
valid_mask=None,
rotation_perturb=np.pi / 4,
center_noise_std=1.0,
global_random_rot_range=np.pi / 4,
num_try=5,
group_ids=None,
):
"""random rotate or remove each groundtrutn independently.
use kitti viewer to test this function points_transform_
Args:
gt_boxes: [N, 7], gt box in lidar.points_transform_
points: [M, 4], point cloud in lidar.
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
global_random_rot_range = [-global_random_rot_range, global_random_rot_range]
enable_grot = (
np.abs(global_random_rot_range[0] - global_random_rot_range[1]) >= 1e-3
)
if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
center_noise_std = [center_noise_std, center_noise_std, center_noise_std]
if valid_mask is None:
valid_mask = np.ones((num_boxes,), dtype=np.bool_)
center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
loc_noises = np.random.normal(scale=center_noise_std, size=[num_boxes, num_try, 3])
# loc_noises = np.random.uniform(
# -center_noise_std, center_noise_std, size=[num_boxes, num_try, 3])
rot_noises = np.random.uniform(
rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try]
)
gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1])
grot_lowers = global_random_rot_range[0] - gt_grots
grot_uppers = global_random_rot_range[1] - gt_grots
global_rot_noises = np.random.uniform(
grot_lowers[..., np.newaxis],
grot_uppers[..., np.newaxis],
size=[num_boxes, num_try],
)
if group_ids is not None:
if enable_grot:
set_group_noise_same_v2_(
loc_noises, rot_noises, global_rot_noises, group_ids
)
else:
set_group_noise_same_(loc_noises, rot_noises, group_ids)
group_centers, group_id_num_dict = get_group_center(gt_boxes[:, :3], group_ids)
if enable_grot:
group_transform_v2_(
loc_noises,
rot_noises,
gt_boxes[:, :3],
gt_boxes[:, 6],
group_centers,
global_rot_noises,
valid_mask,
)
else:
group_transform_(
loc_noises,
rot_noises,
gt_boxes[:, :3],
gt_boxes[:, 6],
group_centers,
valid_mask,
)
group_nums = np.array(list(group_id_num_dict.values()), dtype=np.int64)
origin = [0.5, 0.5, 0.5]
gt_box_corners = box_np_ops.center_to_corner_box3d(
gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=origin, axis=2
)
if group_ids is not None:
if not enable_grot:
selected_noise = noise_per_box_group(
gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask,
loc_noises,
rot_noises,
group_nums,
)
else:
selected_noise = noise_per_box_group_v2_(
gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask,
loc_noises,
rot_noises,
group_nums,
global_rot_noises,
)
else:
if not enable_grot:
selected_noise = noise_per_box(
gt_boxes[:, [0, 1, 3, 4, 6]], valid_mask, loc_noises, rot_noises
)
else:
selected_noise = noise_per_box_v2_(
gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask,
loc_noises,
rot_noises,
global_rot_noises,
)
loc_transforms = _select_transform(loc_noises, selected_noise)
rot_transforms = _select_transform(rot_noises, selected_noise)
surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners)
if points is not None:
point_masks = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
points_transform_(
points,
gt_boxes[:, :3],
point_masks,
loc_transforms,
rot_transforms,
valid_mask,
)
box3d_transform_(gt_boxes, loc_transforms, rot_transforms, valid_mask)
def noise_per_object_v2_(
gt_boxes,
points=None,
valid_mask=None,
rotation_perturb=np.pi / 4,
center_noise_std=1.0,
global_random_rot_range=np.pi / 4,
num_try=100,
):
"""random rotate or remove each groundtrutn independently.
use kitti viewer to test this function points_transform_
Args:
gt_boxes: [N, 7], gt box in lidar.points_transform_
points: [M, 4], point cloud in lidar.
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
global_random_rot_range = [-global_random_rot_range, global_random_rot_range]
if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
center_noise_std = [center_noise_std, center_noise_std, center_noise_std]
if valid_mask is None:
valid_mask = np.ones((num_boxes,), dtype=np.bool_)
center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
loc_noises = np.random.normal(scale=center_noise_std, size=[num_boxes, num_try, 3])
# loc_noises = np.random.uniform(
# -center_noise_std, center_noise_std, size=[num_boxes, num_try, 3])
rot_noises = np.random.uniform(
rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try]
)
gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1])
grot_lowers = global_random_rot_range[0] - gt_grots
grot_uppers = global_random_rot_range[1] - gt_grots
global_rot_noises = np.random.uniform(
grot_lowers[..., np.newaxis],
grot_uppers[..., np.newaxis],
size=[num_boxes, num_try],
)
origin = [0.5, 0.5, 0]
gt_box_corners = box_np_ops.center_to_corner_box3d(
gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=origin, axis=2
)
if np.abs(global_random_rot_range[0] - global_random_rot_range[1]) < 1e-3:
selected_noise = noise_per_box(
gt_boxes[:, [0, 1, 3, 4, 6]], valid_mask, loc_noises, rot_noises
)
else:
selected_noise = noise_per_box_v2_(
gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask,
loc_noises,
rot_noises,
global_rot_noises,
)
loc_transforms = _select_transform(loc_noises, selected_noise)
rot_transforms = _select_transform(rot_noises, selected_noise)
if points is not None:
surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners)
point_masks = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
points_transform_(
points,
gt_boxes[:, :3],
point_masks,
loc_transforms,
rot_transforms,
valid_mask,
)
box3d_transform_(gt_boxes, loc_transforms, rot_transforms, valid_mask)
def global_scaling(gt_boxes, points, scale=0.05):
if not isinstance(scale, list):
scale = [-scale, scale]
noise_scale = np.random.uniform(scale[0] + 1, scale[1] + 1)
points[:, :3] *= noise_scale
gt_boxes[:, :6] *= noise_scale
return gt_boxes, points
def global_rotation(gt_boxes, points, rotation=np.pi / 4):
if not isinstance(rotation, list):
rotation = [-rotation, rotation]
noise_rotation = np.random.uniform(rotation[0], rotation[1])
points[:, :3] = box_np_ops.rotation_points_single_angle(
points[:, :3], noise_rotation, axis=2
)
gt_boxes[:, :3] = box_np_ops.rotation_points_single_angle(
gt_boxes[:, :3], noise_rotation, axis=2
)
if gt_boxes.shape[1] > 7:
gt_boxes[:, 6:8] = box_np_ops.rotation_points_single_angle(
np.hstack([gt_boxes[:, 6:8], np.zeros((gt_boxes.shape[0], 1))]),
noise_rotation,
axis=2,
)[:, :2]
gt_boxes[:, -1] += noise_rotation
return gt_boxes, points
def random_flip(gt_boxes, points, probability=0.5):
enable = np.random.choice(
[False, True], replace=False, p=[1 - probability, probability]
)
if enable:
gt_boxes[:, 1] = -gt_boxes[:, 1]
gt_boxes[:, -1] = -gt_boxes[:, -1] + np.pi
points[:, 1] = -points[:, 1]
if gt_boxes.shape[1] > 7: # y axis: x, y, z, w, h, l, vx, vy, r
gt_boxes[:, 7] = -gt_boxes[:, 7]
return gt_boxes, points
def global_scaling_v2(gt_boxes, points, min_scale=0.95, max_scale=1.05):
noise_scale = np.random.uniform(min_scale, max_scale)
points[:, :3] *= noise_scale
gt_boxes[:, :-1] *= noise_scale
return gt_boxes, points
def global_rotation_v2(gt_boxes, points, min_rad=-np.pi / 4, max_rad=np.pi / 4):
noise_rotation = np.random.uniform(min_rad, max_rad)
points[:, :3] = box_np_ops.rotation_points_single_angle(
points[:, :3], noise_rotation, axis=2
)
gt_boxes[:, :3] = box_np_ops.rotation_points_single_angle(
gt_boxes[:, :3], noise_rotation, axis=2
)
gt_boxes[:, -1] += noise_rotation
return gt_boxes, points
@numba.jit(nopython=True)
def box_collision_test(boxes, qboxes, clockwise=True):
N = boxes.shape[0]
K = qboxes.shape[0]
ret = np.zeros((N, K), dtype=np.bool_)
slices = np.array([1, 2, 3, 0])
lines_boxes = np.stack(
(boxes, boxes[:, slices, :]), axis=2
) # [N, 4, 2(line), 2(xy)]
lines_qboxes = np.stack((qboxes, qboxes[:, slices, :]), axis=2)
# vec = np.zeros((2,), dtype=boxes.dtype)
boxes_standup = box_np_ops.corner_to_standup_nd_jit(boxes)
qboxes_standup = box_np_ops.corner_to_standup_nd_jit(qboxes)
for i in range(N):
for j in range(K):
# calculate standup first
iw = min(boxes_standup[i, 2], qboxes_standup[j, 2]) - max(
boxes_standup[i, 0], qboxes_standup[j, 0]
)
if iw > 0:
ih = min(boxes_standup[i, 3], qboxes_standup[j, 3]) - max(
boxes_standup[i, 1], qboxes_standup[j, 1]
)
if ih > 0:
for k in range(4):
for l in range(4):
A = lines_boxes[i, k, 0]
B = lines_boxes[i, k, 1]
C = lines_qboxes[j, l, 0]
D = lines_qboxes[j, l, 1]
acd = (D[1] - A[1]) * (C[0] - A[0]) > (C[1] - A[1]) * (
D[0] - A[0]
)
bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (
D[0] - B[0]
)
if acd != bcd:
abc = (C[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (
C[0] - A[0]
)
abd = (D[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (
D[0] - A[0]
)
if abc != abd:
ret[i, j] = True # collision.
break
if ret[i, j] is True:
break
if ret[i, j] is False:
# now check complete overlap.
# box overlap qbox:
box_overlap_qbox = True
for l in range(4): # point l in qboxes
for k in range(4): # corner k in boxes
vec = boxes[i, k] - boxes[i, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (boxes[i, k, 0] - qboxes[j, l, 0])
cross -= vec[0] * (boxes[i, k, 1] - qboxes[j, l, 1])
if cross >= 0:
box_overlap_qbox = False
break
if box_overlap_qbox is False:
break
if box_overlap_qbox is False:
qbox_overlap_box = True
for l in range(4): # point l in boxes
for k in range(4): # corner k in qboxes
vec = qboxes[j, k] - qboxes[j, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (qboxes[j, k, 0] - boxes[i, l, 0])
cross -= vec[0] * (qboxes[j, k, 1] - boxes[i, l, 1])
if cross >= 0: #
qbox_overlap_box = False
break
if qbox_overlap_box is False:
break
if qbox_overlap_box:
ret[i, j] = True # collision.
else:
ret[i, j] = True # collision.
return ret
def global_translate_(gt_boxes, points, noise_translate_std):
"""
Apply global translation to gt_boxes and points.
"""
if not isinstance(noise_translate_std, (list, tuple, np.ndarray)):
noise_translate_std = np.array(
[noise_translate_std, noise_translate_std, noise_translate_std]
)
if all([e == 0 for e in noise_translate_std]):
return gt_boxes, points
noise_translate = np.array(
[
np.random.normal(0, noise_translate_std[0], 1),
np.random.normal(0, noise_translate_std[1], 1),
np.random.normal(0, noise_translate_std[0], 1),
]
).T
points[:, :3] += noise_translate
gt_boxes[:, :3] += noise_translate
return gt_boxes, points
if __name__ == "__main__":
bboxes = np.array(
[
[0.0, 0.0, 0.5, 0.5],
[0.2, 0.2, 0.6, 0.6],
[0.7, 0.7, 0.9, 0.9],
[0.55, 0.55, 0.8, 0.8],
]
)
bbox_corners = box_np_ops.minmax_to_corner_2d(bboxes)
print(bbox_corners.shape)
print(box_collision_test(bbox_corners, bbox_corners))
|
the-stack_106_19583
|
# -*- coding: utf-8 -*-
# Copyright 2015 Cyan, Inc.
# Copyright 2017, 2018 Ciena Corporation.
import collections
import struct
from six import string_types, text_type
from .common import BufferUnderflowError
_NULL_SHORT_STRING = struct.pack('>h', -1)
def _buffer_underflow(what, buf, offset, size):
return BufferUnderflowError((
"Not enough data to read {what} at offset {offset:,d}: {size:,d} bytes required,"
" but {available:,d} available."
).format(
what=what,
offset=offset,
size=size,
available=len(buf) - offset,
))
def _coerce_topic(topic):
"""
Ensure that the topic name is text string of a valid length.
:param topic: Kafka topic name. Valid characters are in the set ``[a-zA-Z0-9._-]``.
:raises ValueError: when the topic name exceeds 249 bytes
:raises TypeError: when the topic is not :class:`unicode` or :class:`str`
"""
if not isinstance(topic, string_types):
raise TypeError('topic={!r} must be text'.format(topic))
if not isinstance(topic, text_type):
topic = topic.decode('ascii')
if len(topic) < 1:
raise ValueError('invalid empty topic name')
if len(topic) > 249:
raise ValueError('topic={!r} name is too long: {} > 249'.format(
topic, len(topic)))
return topic
def _coerce_consumer_group(consumer_group):
"""
Ensure that the consumer group is a text string.
:param consumer_group: :class:`bytes` or :class:`str` instance
:raises TypeError: when `consumer_group` is not :class:`bytes`
or :class:`str`
"""
if not isinstance(consumer_group, string_types):
raise TypeError('consumer_group={!r} must be text'.format(consumer_group))
if not isinstance(consumer_group, text_type):
consumer_group = consumer_group.decode('utf-8')
return consumer_group
def _coerce_client_id(client_id):
"""
Ensure the provided client ID is a byte string. If a text string is
provided, it is encoded as UTF-8 bytes.
:param client_id: :class:`bytes` or :class:`str` instance
"""
if isinstance(client_id, type(u'')):
client_id = client_id.encode('utf-8')
if not isinstance(client_id, bytes):
raise TypeError('{!r} is not a valid consumer group (must be'
' str or bytes)'.format(client_id))
return client_id
def write_int_string(s):
if s is None:
return struct.pack('>i', -1)
return struct.pack('>i', len(s)) + s
def write_short_ascii(s):
"""
Encode a Kafka short string which represents ASCII text.
:param str s:
Text string (`str` on Python 3, `str` or `unicode` on Python 2) or
``None``. The string will be ASCII-encoded.
:returns: length-prefixed `bytes`
:raises:
`struct.error` for strings longer than 32767 characters
"""
if s is None:
return _NULL_SHORT_STRING
if not isinstance(s, string_types):
raise TypeError('{!r} is not text'.format(s))
return write_short_bytes(s.encode('ascii'))
def write_short_text(s):
"""
Encode a Kafka short string which represents Unicode text.
:param str s:
Text string (`str` on Python 3, `str` or `unicode` on Python 2) or
``None``. The string will be UTF-8 encoded.
:returns: length-prefixed `bytes`
:raises:
`struct.error` when the UTF-8 encoded form of the string exceeds
32767 bytes.
"""
if s is None:
return _NULL_SHORT_STRING
if not isinstance(s, string_types):
raise TypeError('{!r} is not text'.format(s))
return write_short_bytes(s.encode('utf-8'))
def write_short_bytes(b):
"""
Encode a Kafka short string which contains arbitrary bytes. A short string
is limited to 32767 bytes in length by the signed 16-bit length prefix.
A length prefix of -1 indicates ``null``, represented as ``None`` in
Python.
:param bytes b:
No more than 32767 bytes, or ``None`` for the null encoding.
:return: length-prefixed `bytes`
:raises:
`struct.error` for strings longer than 32767 characters
"""
if b is None:
return _NULL_SHORT_STRING
if not isinstance(b, bytes):
raise TypeError('{!r} is not bytes'.format(b))
elif len(b) > 32767:
raise struct.error(len(b))
else:
return struct.pack('>h', len(b)) + b
def read_short_bytes(data, cur):
if len(data) < cur + 2:
raise _buffer_underflow('short string length', data, cur, 2)
(strlen,) = struct.unpack('>h', data[cur:cur + 2])
if strlen == -1:
return None, cur + 2
cur += 2
if len(data) < cur + strlen:
raise _buffer_underflow('short string', data, cur, strlen)
out = data[cur:cur + strlen]
return out, cur + strlen
def read_short_ascii(data, cur):
b, cur = read_short_bytes(data, cur)
return b.decode('ascii'), cur
def read_short_text(data, cur):
b, cur = read_short_bytes(data, cur)
return b.decode('utf-8'), cur
def read_int_string(data, cur):
if len(data) < cur + 4:
raise _buffer_underflow('long string length', data, cur, 4)
(strlen,) = struct.unpack('>i', data[cur:cur + 4])
if strlen == -1:
return None, cur + 4
cur += 4
if len(data) < cur + strlen:
raise _buffer_underflow('long string', data, cur, strlen)
out = data[cur:cur + strlen]
return out, cur + strlen
def relative_unpack(fmt, data, cur):
size = struct.calcsize(fmt)
if len(data) < cur + size:
raise _buffer_underflow(fmt, data, cur, size)
out = struct.unpack(fmt, data[cur:cur + size])
return out, cur + size
def group_by_topic_and_partition(tuples):
out = collections.defaultdict(dict)
for t in tuples:
out[t.topic][t.partition] = t
return out
|
the-stack_106_19584
|
import argparse
import numpy as np
import torch
import model
def run_grad_weight(kcnn, sentence):
out = kcnn.rank(sentence)
h = kcnn.compute_grad_norm(torch.sum(out[1]))
for w, score in zip(sentence.split(), h):
print(w, score)
def run_grad_pca(kcnn, sentence):
out = kcnn.rank(sentence)
print("Prediction: {}".format(out[0].cpu().data[0]))
toks = sentence.split()
mat = kcnn.compute_corr_matrix(torch.sum(out[1]))
max_len = max([len(tok) for tok in toks])
fmt_str = " " * (max_len + 1) + " ".join(["{:>%s}" % max(len(word), 6) for word in toks])
print(fmt_str.format(*toks))
for i, (word, row) in enumerate(zip(toks, mat)):
print(("{:>%s}" % max_len).format(word), end=" ")
for j, (w2, val) in enumerate(zip(toks, row)):
if i == j and abs(val) > 0.1:
print("\x1b[1;33m", end="")
print(("{:>%s}" % max(len(w2), 6)).format(round(val, 3)), end=" ")
print("\x1b[1;0m", end="")
print()
s, v = np.linalg.eig(mat)
fmt_str = " ".join(["{:>%s}" % max(len(word), 6) for word in toks])
v = v.transpose()
print(fmt_str.format(*toks) + " [lambda]")
for row, s_val in zip(v, s):
for word, val in zip(toks, row):
if abs(val) > 0.25:
print("\x1b[1;33m", end="")
print(("{:>%s}" % max(len(word), 6)).format(round(val, 3)), end=" ")
print("\x1b[1;0m", end="")
print(round(s_val, 3))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("sentence", type=str)
parser.add_argument("--input_file", type=str, default="model.pt")
parser.add_argument("--mode", type=str, choices=["grad_pca", "grad_weight"], default="grad_pca")
args = parser.parse_args()
kcnn = torch.load(args.input_file)
kcnn.eval()
if args.mode == "grad_pca":
run_grad_pca(kcnn, args.sentence)
else:
run_grad_weight(kcnn, args.sentence)
main()
|
the-stack_106_19585
|
from mission.constants.missions import Gate, Path
from mission.constants import teagle
from conf.vehicle import is_mainsub
# HYDROPHONES_PINGER_DEPTH = 4.7
NONSURFACE_MIN_DEPTH = 0.5 # if is_mainsub else 1.5
# Note: These values are copied straight from the Teagle configuration.
# They need to be updated for Transdec!!
# gate = Gate(
# depth=2.0,
# gate_width_threshold=0.4,
# charge_dist=16 if is_mainsub() else 12
# )
path = Path(
depth=1.0,
search_forward=6 if is_mainsub else 2,
search_stride = 10 if is_mainsub else 8,
search_right_first=True,
search_speed=0.1,
post_dist=2.5,
failure_back_up_dist=0.5 if is_mainsub else 0.1,
failure_back_up_speed=0.2 if is_mainsub else 0.1,
)
#
#dice = Dice(
# depth=3.3,
# max_depth=4,
# search_forward=4,
# search_stride=8,
# search_speed=0.2,
# min_dot_radius=0.04,
# ram_dist=1.5,
# ram_speed=0.16,
# rammed_back_up_timeout=20,
# lost_sight_back_up_timeout=5,
# search_default_zero_timeout=60,
#)
#
#highway = Highway(
# high_depth=1.6,
# low_depth=1.6,
# dist=6 if is_mainsub() else 2,
# speed=0.4 if is_mainsub() else 0.2,
#)
#
#track = Track(
# depth=2.2 if is_mainsub() else 2.2,
# slow_down_dist=5,
# max_speed=0.3 if is_mainsub() else 0.2,
# min_speed=0.1,
# vision_frame_period=0.5,
#)
#
#roulette = Roulette(
# depth_search=1.5,
# depth_realign=3.2,
# depth_drop=3.9,
# heading_offset=-30,
#)
#
#cash_in = CashIn(
# approach_funnel_depth=0.5,
# drop_approach_dist=0.2,
# # (right, left)
# drop_dvl_forward_correct_dist=(0.1, -0.13),
# drop_heading_correct=(0, -7),
# pick_up_both_depth=2.0,
# pick_up_search_depth_1=2.5,
# pick_up_search_depth_2=3.0,
# pick_up_search_depth_3=3.5,
# pick_up_start_follow_depth=4.5,
# attempt_surface_depth=-1,
# attempt_funnel_depth=0,
#)
|
the-stack_106_19587
|
"""
"""
import os
import h5py
import numpy as np
from ..subhalo_mass_function import log10_cumulative_shmf
from ..subhalo_mass_function import DEFAULT_SHMF_PARAMS, DEFAULT_SHMF_PARAM_BOUNDS
_THIS_DRNAME = os.path.dirname(os.path.abspath(__file__))
def test_default_shmf_agrees_with_bpl():
fname = os.path.join(_THIS_DRNAME, "testing_data", "bpl_shmf_z0.h5")
with h5py.File(fname, "r") as hdf:
logmp_table = hdf["logmp_table"][...]
lognd_table = hdf["lognd_table"][...]
lognd_pred = log10_cumulative_shmf(logmp_table)
mse = np.mean((lognd_pred - lognd_table) ** 2)
assert mse < 0.005
def test_default_shmf_params_are_in_bounds():
for param_name, bounds in DEFAULT_SHMF_PARAM_BOUNDS.items():
default_value = DEFAULT_SHMF_PARAMS[param_name]
assert bounds[0] < default_value < bounds[1]
|
the-stack_106_19588
|
import torch
from .base_model import BaseModel
from . import networks
from typing import Union
class IntrinsicUnetModel(BaseModel):
""" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
The model training requires '--dataset_mode aligned' dataset.
By default, it uses a '--netG unet256' U-Net generator,
a '--netD basic' discriminator (PatchGAN),
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')
parser.add_argument('--netG_dec', type=int, default='1', help='The number of generator output')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_S', type=float, default=100.0, help='weight for Shading loss')
parser.add_argument('--lambda_R', type=float, default=100.0, help='weight for Reflection loss')
parser.add_argument('--loss_mask', action='store_true', help='Masked image when calculating loss')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
if opt.netG_dec==1:
self.loss_names = ['G_R']
self.visual_names = ['real_I', 'fake_R', 'real_R', 'fake_S', 'real_S', 'mask']
else:
self.loss_names = ['G_R', 'G_S']
self.visual_names = ['fake_I', 'real_I', 'fake_R', 'real_R', 'fake_S', 'fake_I_R', 'real_S', 'mask']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
if self.isTrain:
self.model_names = ['G']
else: # during test time, only load G
self.model_names = ['G']
# define networks (both generator and discriminator)
output = opt.output_nc*opt.netG_dec
print('generator output:', output)
self.netG = networks.define_G(opt.input_nc, output, opt.ngf, opt.netG, opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap images in domain A and domain B.
"""
self.real_I = torch.squeeze(input['A'],0).to(self.device) # [bn, 3, 256, 256]
self.real_R = torch.squeeze(input['B'],0).to(self.device) # [bn, 3, 256, 256]
self.real_S = torch.squeeze(input['C'],0).to(self.device) # [bn, 3, 256, 256]
self.mask = torch.squeeze(input['D'],0).to(self.device) # [bn, 3, 256, 256]
self.image_paths = input['A_paths']
def percentile(self, t: torch.tensor, q: float) -> Union[int, float]:
k = 1 + round(.01 * float(q) * (t.numel() - 1))
result = t.view(-1).kthvalue(k).values.item()
return result
def calc_shading(self, img, albedo, mask):
img = torch.clamp(img * 0.5 + 0.5, min=0.0, max=1.0) # 0~1
albedo = torch.clamp(albedo * 0.5 + 0.5, min=1e-6, max=1.0) # 0~1
shading = img**2.2/albedo
if self.opt.shading_norm:
if torch.sum(mask) < 10:
max_S = 1.0
else:
max_S = self.percentile(shading[self.mask.expand(shading.size()) > 0.5], 90)
shading = shading/max_S
shading = (shading - 0.5) / 0.5
return torch.clamp(shading, min=-1.0, max=1.0) # -1~1
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
if self.opt.netG_dec==1:
self.fake_R = self.netG(self.real_I) # G(A)
self.fake_S = self.calc_shading(self.real_I, self.fake_R, self.mask)
else:
fake_RS = self.netG(self.real_I) # G(A)
self.fake_R = fake_RS[:,:self.opt.output_nc,:,:]
self.fake_S = fake_RS[:,self.opt.output_nc:,:,:]
self.fake_I_R = self.calc_shading(self.real_I, self.fake_R, self.mask)
self.fake_I = self.fake_R + self.fake_S
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
if self.opt.netG_dec==1:
if self.opt.loss_mask:
mask = self.mask*0.5 + 0.5
self.loss_G_R = self.criterionL1(self.fake_R*mask, self.real_R*mask) * self.opt.lambda_R
else:
self.loss_G_R = self.criterionL1(self.fake_R, self.real_R) * self.opt.lambda_R
self.loss_G = self.loss_G_R
else:
if self.opt.loss_mask:
mask = self.mask*0.5 + 0.5
self.loss_G_R = self.criterionL1(self.fake_R*mask, self.real_R*mask) * self.opt.lambda_R
self.loss_G_S = self.criterionL1(self.fake_S*mask, self.real_S*mask) * self.opt.lambda_S
else:
self.loss_G_R = self.criterionL1(self.fake_R, self.real_R) * self.opt.lambda_R
self.loss_G_S = self.criterionL1(self.fake_S, self.real_S) * self.opt.lambda_S
self.loss_G = self.loss_G_R + self.loss_G_S
self.loss_G.backward()
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
|
the-stack_106_19590
|
"""
OXASL_OPTPCASL: Widget to control the optimization process
Copyright (c) 2019 University of Oxford
"""
import os
import wx
import wx.grid
from ..structures import ScanParams, PhysParams, ATTDist, Limits
from ..cost import CBFCost, ATTCost, DOptimalCost
from .widgets import TabPage, NumberChooser
class OptimizerOptions(TabPage):
"""
Tab page for specifying options for the optimization
"""
def __init__(self, parent, idx, n):
TabPage.__init__(self, parent, "Optimization", idx, n, name="optimization")
self.section("Optimization type")
self._opttype = self.choice("Method", choices=["Optimize CBF and ATT", "Optimize CBF only", "Optimize ATT only"])
self.section("ATT prior distribution")
self._att_start = self.number("Starting value (s)", minval=0, maxval=1.0, initial=0.2, digits=4)
self._att_end = self.number("Starting value (s)", minval=0, maxval=5.0, initial=2.3, digits=4)
self._att_step = self.number("Step (s)", minval=0, maxval=0.01, initial=0.001, digits=4)
self._att_taper = self.number("Taper value (s)", minval=0, maxval=1.0, initial=0.3, digits=4)
self.section("PLD search limits")
self._pld_min = self.number("Min PLD (s)", minval=0, maxval=1.0, initial=0.075, digits=4)
self._pld_max = self.number("Max PLD (s)", minval=1.0, maxval=5.0, initial=2.3, digits=4)
self._pld_step = self.number("Search step (s)", minval=0, maxval=0.1, initial=0.025, digits=4)
self.section("LD search limits")
self._ld_min = self.number("Min LD (s)", minval=0, maxval=1.0, initial=0.1, digits=4)
self._ld_max = self.number("Max LD (s)", minval=1.0, maxval=5.0, initial=1.8, digits=4)
self._ld_step = self.number("Search step (s)", minval=0, maxval=0.1, initial=0.025, digits=4)
self.section("Optimization loop")
self._niters = self.integer("Number of times to repeat optimization", minval=1, maxval=100, initial=10)
self._ngridpoints = self.integer("Initial grid search - number of points", minval=100, maxval=1000000, initial=10000, optional=True, handler=self._gridpts_changed)
self._set_btn = self.button("Optimize", handler=self._optimize)
self.sizer.AddGrowableCol(1, 1)
self.SetSizer(self.sizer)
@property
def att_dist(self):
return ATTDist(self._att_start.GetValue(), self._att_end.GetValue(),
self._att_step.GetValue(), self._att_taper.GetValue())
@property
def pld_lims(self):
return Limits(self._pld_min.GetValue(), self._pld_max.GetValue(), self._pld_step.GetValue(), name="PLD")
@property
def ld_lims(self):
return Limits(self._ld_min.GetValue(), self._ld_max.GetValue(), self._ld_step.GetValue(), name="LD")
@property
def gridpts(self):
if self._ngridpoints.checkbox.GetValue():
return self._ngridpoints.GetValue()
else:
return None
@property
def cost_model(self):
if self._opttype.GetSelection() == 1:
return CBFCost()
if self._opttype.GetSelection() == 2:
return ATTCost()
else:
return DOptimalCost()
def _gridpts_changed(self, _event=None):
if self._ngridpoints.checkbox.GetValue():
self._ngridpoints.Enable()
else:
self._ngridpoints.Disable()
def _optimize(self, _event=None):
self._set_btn.Disable()
self.notebook.win.optimize(self._niters.GetValue())
|
the-stack_106_19591
|
__all__ = ["model"]
from icevision.imports import *
from icevision.backbones import resnet_fpn
from icevision.models.torchvision.utils import *
from torchvision.models.detection.keypoint_rcnn import (
keypointrcnn_resnet50_fpn,
KeypointRCNNPredictor,
KeypointRCNN,
)
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
def model(
num_keypoints: int,
num_classes: int = 2,
backbone: Optional[nn.Module] = None,
remove_internal_transforms: bool = True,
pretrained: bool = True,
**keypoint_rcnn_kwargs
) -> nn.Module:
"""KeypointRCNN model implemented by torchvision.
# Arguments
num_keypoints: Number of keypoints (e.g. 17 in case of COCO).
num_classes: Number of classes (including background).
backbone: Backbone model to use. Defaults to a resnet50_fpn model.
remove_internal_transforms: The torchvision model internally applies transforms
like resizing and normalization, but we already do this at the `Dataset` level,
so it's safe to remove those internal transforms.
pretrained: Argument passed to `keypointrcnn_resnet50_fpn` if `backbone is None`.
By default it is set to True: this is generally used when training a new model (transfer learning).
`pretrained = False` is used during inference (prediction) for cases where the users have their own pretrained weights.
**keypoint_rcnn_kwargs: Keyword arguments that internally are going to be passed to
`torchvision.models.detection.keypoint_rcnn.KeypointRCNN`.
# Returns
A Pytorch `nn.Module`.
"""
if backbone is None:
model = keypointrcnn_resnet50_fpn(
pretrained=pretrained,
pretrained_backbone=pretrained,
**keypoint_rcnn_kwargs
)
in_channels = model.roi_heads.keypoint_predictor.kps_score_lowres.in_channels
model.roi_heads.keypoint_predictor = KeypointRCNNPredictor(
in_channels, num_keypoints
)
if num_classes != 2:
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
resnet_fpn.patch_param_groups(model.backbone)
else:
model = KeypointRCNN(
backbone,
num_classes=num_classes,
num_keypoints=num_keypoints,
**keypoint_rcnn_kwargs
)
patch_rcnn_param_groups(model=model)
if remove_internal_transforms:
remove_internal_model_transforms(model)
return model
|
the-stack_106_19592
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Internal exception with freetype error message
'''
class FT_Exception(Exception):
def __init__(self, errcode, message=''):
self.message = message
self.errcode = errcode
def __str__(self):
return '%s: %s (%s)'%(self.__class__.__name__, self.message,
self._errors.get(self.errcode, 'unknown error'))
_errors = {
0x00: "no error" ,
0x01: "cannot open resource" ,
0x02: "unknown file format" ,
0x03: "broken file" ,
0x04: "invalid FreeType version" ,
0x05: "module version is too low" ,
0x06: "invalid argument" ,
0x07: "unimplemented feature" ,
0x08: "broken table" ,
0x09: "broken offset within table" ,
0x10: "invalid glyph index" ,
0x11: "invalid character code" ,
0x12: "unsupported glyph image format" ,
0x13: "cannot render this glyph format" ,
0x14: "invalid outline" ,
0x15: "invalid composite glyph" ,
0x16: "too many hints" ,
0x17: "invalid pixel size" ,
0x20: "invalid object handle" ,
0x21: "invalid library handle" ,
0x22: "invalid module handle" ,
0x23: "invalid face handle" ,
0x24: "invalid size handle" ,
0x25: "invalid glyph slot handle" ,
0x26: "invalid charmap handle" ,
0x27: "invalid cache manager handle" ,
0x28: "invalid stream handle" ,
0x30: "too many modules" ,
0x31: "too many extensions" ,
0x40: "out of memory" ,
0x41: "unlisted object" ,
0x51: "cannot open stream" ,
0x52: "invalid stream seek" ,
0x53: "invalid stream skip" ,
0x54: "invalid stream read" ,
0x55: "invalid stream operation" ,
0x56: "invalid frame operation" ,
0x57: "nested frame access" ,
0x58: "invalid frame read" ,
0x60: "raster uninitialized" ,
0x61: "raster corrupted" ,
0x62: "raster overflow" ,
0x63: "negative height while rastering" ,
0x70: "too many registered caches" ,
0x80: "invalid opcode" ,
0x81: "too few arguments" ,
0x82: "stack overflow" ,
0x83: "code overflow" ,
0x84: "bad argument" ,
0x85: "division by zero" ,
0x86: "invalid reference" ,
0x87: "found debug opcode" ,
0x88: "found ENDF opcode in execution stream" ,
0x89: "nested DEFS" ,
0x8A: "invalid code range" ,
0x8B: "execution context too long" ,
0x8C: "too many function definitions" ,
0x8D: "too many instruction definitions" ,
0x8E: "SFNT font table missing" ,
0x8F: "horizontal header (hhea, table missing" ,
0x90: "locations (loca, table missing" ,
0x91: "name table missing" ,
0x92: "character map (cmap, table missing" ,
0x93: "horizontal metrics (hmtx, table missing" ,
0x94: "PostScript (post, table missing" ,
0x95: "invalid horizontal metrics" ,
0x96: "invalid character map (cmap, format" ,
0x97: "invalid ppem value" ,
0x98: "invalid vertical metrics" ,
0x99: "could not find context" ,
0x9A: "invalid PostScript (post, table format" ,
0x9B: "invalid PostScript (post, table" ,
0xA0: "opcode syntax error" ,
0xA1: "argument stack underflow" ,
0xA2: "ignore" ,
0xB0: "`STARTFONT' field missing" ,
0xB1: "`FONT' field missing" ,
0xB2: "`SIZE' field missing" ,
0xB3: "`CHARS' field missing" ,
0xB4: "`STARTCHAR' field missing" ,
0xB5: "`ENCODING' field missing" ,
0xB6: "`BBX' field missing" ,
0xB7: "`BBX' too big" ,
}
|
the-stack_106_19593
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
aux = list(filter(lambda x: len(x) >= 2, words))
aux = list(filter(lambda x: x[0] == x[-1], aux))
return len(aux)
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
ord_x = sorted(filter(lambda x: x[0] == 'x', words))
ord_resto = sorted(filter(lambda x: x[0] != 'x', words))
for i in ord_resto:
ord_x.append(i)
return ord_x
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
return sorted(tuples, key=lambda x: x[-1])
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
# Calls the above functions with interesting inputs.
def main():
print('match_ends')
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print()
print('front_x')
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print()
print('sort_last')
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
|
the-stack_106_19594
|
import unittest
from torpido.pmpi import Communication
class PmpiTest(unittest.TestCase):
def test_communication(self):
sent = {"mydata", 200}
def my_function(data):
self.assertEqual(data, sent)
comm = Communication()
comm.register("ID", my_function)
sender = comm.sender()
sender.send("ID", sent)
comm.end()
def test_dtype(self):
sent = {"mydata", 200}
def my_function(data):
self.assertEqual(dict, type(data))
comm = Communication()
comm.register("ID", my_function)
comm.unregister("ID")
sender = comm.sender()
sender.send("ID", sent)
comm.end()
def test_multiple_id(self):
sent = {"mydata", 200}
def my_function(data):
self.assertEqual(dict, type(data))
comm = Communication()
comm.register("ID", my_function)
comm.register("IDE", my_function)
sender = comm.sender()
from time import sleep
sleep(0.1)
sender.send("ID", sent)
sender.send("IDE", sent)
comm.end()
if __name__ == '__main__':
unittest.main()
|
the-stack_106_19596
|
import pandas
import xlwt
from srblib import abs_path
def excel_to_data(inp_path):
inp_path = abs_path(inp_path)
raw_data = pandas.read_excel(inp_path)
header = list(raw_data.columns)
if(len(header) == 0):
return []
temp_data = []
for head in header:
col = list(raw_data[head])
temp_data.append(col)
data = [header]
for i in range(len(temp_data[0])):
row = []
for j in range(len(header)):
row.append(temp_data[j][i])
data.append(row)
return data
def data_to_excel(data,out_path):
out_path = abs_path(out_path)
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet("Sheet1")
for i in range(len(data)):
row = data[i]
for j in range(len(row)):
sheet.write(i,j,row[j])
book.save(out_path)
data = excel_to_data('excel.xlsx')
print(data)
data_to_excel(data,'output.xlsx')
data = excel_to_data('output.xlsx')
print(data)
|
the-stack_106_19597
|
from statsmodels.compat.python import (lrange, iterkeys, iteritems, lzip,
reduce, itervalues, zip, string_types,
range)
from collections import OrderedDict
import datetime
import re
import textwrap
import numpy as np
import pandas as pd
from .table import SimpleTable
from .tableformatting import fmt_latex, fmt_txt
class Summary(object):
def __init__(self):
self.tables = []
self.settings = []
self.extra_txt = []
self.title = None
self._merge_latex = False
def __str__(self):
return self.as_text()
def __repr__(self):
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_df(self, df, index=True, header=True, float_format='%.4f',
align='r'):
'''Add the contents of a DataFrame to summary table
Parameters
----------
df : DataFrame
header: bool
Reproduce the DataFrame column labels in summary table
index: bool
Reproduce the DataFrame row labels in summary table
float_format: string
Formatting to float data columns
align : string
Data alignment (l/c/r)
'''
settings = {'index': index, 'header': header,
'float_format': float_format, 'align': align}
self.tables.append(df)
self.settings.append(settings)
def add_array(self, array, align='r', float_format="%.4f"):
'''Add the contents of a Numpy array to summary table
Parameters
----------
array : numpy array (2D)
float_format: string
Formatting to array if type is float
align : string
Data alignment (l/c/r)
'''
table = pd.DataFrame(array)
self.add_df(table, index=False, header=False,
float_format=float_format, align=align)
def add_dict(self, d, ncols=2, align='l', float_format="%.4f"):
'''Add the contents of a Dict to summary table
Parameters
----------
d : dict
Keys and values are automatically coerced to strings with str().
Users are encouraged to format them before using add_dict.
ncols: int
Number of columns of the output table
align : string
Data alignment (l/c/r)
'''
keys = [_formatter(x, float_format) for x in iterkeys(d)]
vals = [_formatter(x, float_format) for x in itervalues(d)]
data = np.array(lzip(keys, vals))
if data.shape[0] % ncols != 0:
pad = ncols - (data.shape[0] % ncols)
data = np.vstack([data, np.array(pad * [['', '']])])
data = np.split(data, ncols)
data = reduce(lambda x, y: np.hstack([x, y]), data)
self.add_array(data, align=align)
def add_text(self, string):
'''Append a note to the bottom of the summary table. In ASCII tables,
the note will be wrapped to table width. Notes are not indendented.
'''
self.extra_txt.append(string)
def add_title(self, title=None, results=None):
'''Insert a title on top of the summary table. If a string is provided
in the title argument, that string is printed. If no title string is
provided but a results instance is provided, statsmodels attempts
to construct a useful title automatically.
'''
if isinstance(title, string_types):
self.title = title
else:
try:
model = results.model.__class__.__name__
if model in _model_types:
model = _model_types[model]
self.title = 'Results: ' + model
except:
self.title = ''
def add_base(self, results, alpha=0.05, float_format="%.4f", title=None,
xname=None, yname=None):
'''Try to construct a basic summary instance.
Parameters
----------
results : Model results instance
alpha : float
significance level for the confidence intervals (optional)
float_formatting: string
Float formatting for summary of parameters (optional)
title : string
Title of the summary table (optional)
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
'''
param = summary_params(results, alpha=alpha, use_t=results.use_t)
info = summary_model(results)
if xname is not None:
param.index = xname
if yname is not None:
info['Dependent Variable:'] = yname
self.add_dict(info, align='l')
self.add_df(param, float_format=float_format)
self.add_title(title=title, results=results)
def as_text(self):
'''Generate ASCII Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
extra_txt = self.extra_txt
pad_col, pad_index, widest = _measure_tables(tables, settings)
rule_equal = widest * '='
#TODO: this isn't used anywhere?
rule_dash = widest * '-'
simple_tables = _simple_tables(tables, settings, pad_col, pad_index)
tab = [x.as_text() for x in simple_tables]
tab = '\n'.join(tab)
tab = tab.split('\n')
tab[0] = rule_equal
tab.append(rule_equal)
tab = '\n'.join(tab)
if title is not None:
title = title
if len(title) < widest:
title = ' ' * int(widest/2 - len(title)/2) + title
else:
title = ''
txt = [textwrap.wrap(x, widest) for x in extra_txt]
txt = ['\n'.join(x) for x in txt]
txt = '\n'.join(txt)
out = '\n'.join([title, tab, txt])
return out
def as_html(self):
'''Generate HTML Summary Table
'''
tables = self.tables
settings = self.settings
#TODO: this isn't used anywhere
title = self.title
simple_tables = _simple_tables(tables, settings)
tab = [x.as_html() for x in simple_tables]
tab = '\n'.join(tab)
return tab
def as_latex(self):
'''Generate LaTeX Summary Table
'''
tables = self.tables
settings = self.settings
title = self.title
if title is not None:
title = '\\caption{' + title + '}'
else:
title = '\\caption{}'
simple_tables = _simple_tables(tables, settings)
tab = [x.as_latex_tabular() for x in simple_tables]
tab = '\n\\hline\n'.join(tab)
to_replace = ('\\\\hline\\n\\\\hline\\n\\\\'
'end{tabular}\\n\\\\begin{tabular}{.*}\\n')
if self._merge_latex:
# create single tabular object for summary_col
tab = re.sub(to_replace,r'\\midrule\n\\midrule\n', tab)
out = '\\begin{table}', title, tab, '\\end{table}'
out = '\n'.join(out)
return out
def _measure_tables(tables, settings):
'''Compare width of ascii tables in a list and calculate padding values.
We add space to each col_sep to get us as close as possible to the
width of the largest table. Then, we add a few spaces to the first
column to pad the rest.
'''
simple_tables = _simple_tables(tables, settings)
tab = [x.as_text() for x in simple_tables]
length = [len(x.splitlines()[0]) for x in tab]
len_max = max(length)
pad_sep = []
pad_index = []
for i in range(len(tab)):
nsep = max(tables[i].shape[1] - 1, 1)
pad = int((len_max - length[i]) / nsep)
pad_sep.append(pad)
len_new = length[i] + nsep * pad
pad_index.append(len_max - len_new)
return pad_sep, pad_index, max(length)
# Useful stuff
_model_types = {'OLS' : 'Ordinary least squares',
'GLS' : 'Generalized least squares',
'GLSAR' : 'Generalized least squares with AR(p)',
'WLS' : 'Weighted least squares',
'RLM' : 'Robust linear model',
'NBin': 'Negative binomial model',
'GLM' : 'Generalized linear model'
}
def summary_model(results):
'''Create a dict with information about the model
'''
def time_now(*args, **kwds):
now = datetime.datetime.now()
return now.strftime('%Y-%m-%d %H:%M')
info = OrderedDict()
info['Model:'] = lambda x: x.model.__class__.__name__
info['Model Family:'] = lambda x: x.family.__class.__name__
info['Link Function:'] = lambda x: x.family.link.__class__.__name__
info['Dependent Variable:'] = lambda x: x.model.endog_names
info['Date:'] = time_now
info['No. Observations:'] = lambda x: "%#6d" % x.nobs
info['Df Model:'] = lambda x: "%#6d" % x.df_model
info['Df Residuals:'] = lambda x: "%#6d" % x.df_resid
info['Converged:'] = lambda x: x.mle_retvals['converged']
info['No. Iterations:'] = lambda x: x.mle_retvals['iterations']
info['Method:'] = lambda x: x.method
info['Norm:'] = lambda x: x.fit_options['norm']
info['Scale Est.:'] = lambda x: x.fit_options['scale_est']
info['Cov. Type:'] = lambda x: x.fit_options['cov']
rsquared_type = '' if results.k_constant else ' (uncentered)'
info['R-squared' + rsquared_type + ':'] = lambda x: "%#8.3f" % x.rsquared
info['Adj. R-squared' + rsquared_type + ':'] = lambda x: "%#8.3f" % x.rsquared_adj
info['Pseudo R-squared:'] = lambda x: "%#8.3f" % x.prsquared
info['AIC:'] = lambda x: "%8.4f" % x.aic
info['BIC:'] = lambda x: "%8.4f" % x.bic
info['Log-Likelihood:'] = lambda x: "%#8.5g" % x.llf
info['LL-Null:'] = lambda x: "%#8.5g" % x.llnull
info['LLR p-value:'] = lambda x: "%#8.5g" % x.llr_pvalue
info['Deviance:'] = lambda x: "%#8.5g" % x.deviance
info['Pearson chi2:'] = lambda x: "%#6.3g" % x.pearson_chi2
info['F-statistic:'] = lambda x: "%#8.4g" % x.fvalue
info['Prob (F-statistic):'] = lambda x: "%#6.3g" % x.f_pvalue
info['Scale:'] = lambda x: "%#8.5g" % x.scale
out = OrderedDict()
for key, func in iteritems(info):
try:
out[key] = func(results)
# NOTE: some models don't have loglike defined (RLM), so that's NIE
except (AttributeError, KeyError, NotImplementedError):
pass
return out
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, float_format="%.4f"):
'''create a summary table of parameters from results instance
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
float_format : string
float formatting options (e.g. ".3g")
Returns
-------
params_table : SimpleTable instance
'''
if isinstance(results, tuple):
results, params, bse, tvalues, pvalues, conf_int = results
else:
params = results.params
bse = results.bse
tvalues = results.tvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
data = np.array([params, bse, tvalues, pvalues]).T
data = np.hstack([data, conf_int])
data = pd.DataFrame(data)
if use_t:
data.columns = ['Coef.', 'Std.Err.', 't', 'P>|t|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
else:
data.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
if not xname:
try:
data.index = results.model.data.param_names
except AttributeError:
data.index = results.model.exog_names
else:
data.index = xname
return data
# Vertical summary instance for multiple models
def _col_params(result, float_format='%.4f', stars=True):
'''Stack coefficients and standard errors in single column
'''
# Extract parameters
res = summary_params(result)
# Format float
for col in res.columns[:2]:
res[col] = res[col].apply(lambda x: float_format % x)
# Std.Errors in parentheses
res.iloc[:, 1] = '(' + res.iloc[:, 1] + ')'
# Significance stars
if stars:
idx = res.iloc[:, 3] < .1
res.loc[idx, res.columns[0]] = res.loc[idx, res.columns[0]] + '*'
idx = res.iloc[:, 3] < .05
res.loc[idx, res.columns[0]] = res.loc[idx, res.columns[0]] + '*'
idx = res.iloc[:, 3] < .01
res.loc[idx, res.columns[0]] = res.loc[idx, res.columns[0]] + '*'
# Stack Coefs and Std.Errors
res = res.iloc[:, :2]
res = res.stack()
res = pd.DataFrame(res)
res.columns = [str(result.model.endog_names)]
return res
def _col_info(result, info_dict=None):
'''Stack model info in a column
'''
if info_dict is None:
info_dict = {}
out = []
index = []
for i in info_dict:
if isinstance(info_dict[i], dict):
# this is a specific model info_dict, but not for this result...
continue
try:
out.append(info_dict[i](result))
except:
out.append('')
index.append(i)
out = pd.DataFrame({str(result.model.endog_names): out}, index=index)
return out
def _make_unique(list_of_names):
if len(set(list_of_names)) == len(list_of_names):
return list_of_names
# pandas does not like it if multiple columns have the same names
from collections import defaultdict
name_counter = defaultdict(str)
header = []
for _name in list_of_names:
name_counter[_name] += "I"
header.append(_name+" " + name_counter[_name])
return header
def summary_col(results, float_format='%.4f', model_names=(), stars=False,
info_dict=None, regressor_order=(), drop_omitted=False):
"""
Summarize multiple results instances side-by-side (coefs and SEs)
Parameters
----------
results : statsmodels results instance or list of result instances
float_format : string, optional
float format for coefficients and standard errors
Default : '%.4f'
model_names : list of strings, optional
Must have same length as the number of results. If the names are not
unique, a roman number will be appended to all model names
stars : bool
print significance stars
info_dict : dict
dict of functions to be applied to results instances to retrieve
model info. To use specific information for different models, add a
(nested) info_dict with model name as the key.
Example: `info_dict = {"N":..., "R2": ..., "OLS":{"R2":...}}` would
only show `R2` for OLS regression models, but additionally `N` for
all other results.
Default : None (use the info_dict specified in
result.default_model_infos, if this property exists)
regressor_order : list of strings, optional
list of names of the regressors in the desired order. All regressors
not specified will be appended to the end of the list.
drop_omitted : bool, optional
Includes regressors that are not specified in regressor_order. If False,
regressors not specified will be appended to end of the list. If True,
only regressors in regressors_list will be included.
"""
if not isinstance(results, list):
results = [results]
cols = [_col_params(x, stars=stars, float_format=float_format) for x in
results]
# Unique column names (pandas has problems merging otherwise)
if model_names:
colnames = _make_unique(model_names)
else:
colnames = _make_unique([x.columns[0] for x in cols])
for i in range(len(cols)):
cols[i].columns = [colnames[i]]
merg = lambda x, y: x.merge(y, how='outer', right_index=True,
left_index=True)
summ = reduce(merg, cols)
if regressor_order:
varnames = summ.index.get_level_values(0).tolist()
ordered = [x for x in regressor_order if x in varnames]
unordered = [x for x in varnames if x not in regressor_order + ['']]
order = ordered + list(np.unique(unordered))
f = lambda idx: sum([[x + 'coef', x + 'stde'] for x in idx], [])
summ.index = f(pd.unique(varnames))
summ = summ.reindex(f(order))
summ.index = [x[:-4] for x in summ.index]
if drop_omitted:
summ = summ.loc[regressor_order]
idx = pd.Series(lrange(summ.shape[0])) % 2 == 1
summ.index = np.where(idx, '', summ.index.get_level_values(0))
# add infos about the models.
if info_dict:
cols = [_col_info(x, info_dict.get(x.model.__class__.__name__,
info_dict)) for x in results]
else:
cols = [_col_info(x, getattr(x, "default_model_infos", None)) for x in
results]
# use unique column names, otherwise the merge will not succeed
for df, name in zip(cols, _make_unique([df.columns[0] for df in cols])):
df.columns = [name]
merg = lambda x, y: x.merge(y, how='outer', right_index=True,
left_index=True)
info = reduce(merg, cols)
dat = pd.DataFrame(np.vstack([summ, info])) # pd.concat better, but error
dat.columns = summ.columns
dat.index = pd.Index(summ.index.tolist() + info.index.tolist())
summ = dat
summ = summ.fillna('')
smry = Summary()
smry._merge_latex = True
smry.add_df(summ, header=True, align='l')
smry.add_text('Standard errors in parentheses.')
if stars:
smry.add_text('* p<.1, ** p<.05, ***p<.01')
return smry
def _formatter(element, float_format='%.4f'):
try:
out = float_format % element
except:
out = str(element)
return out.strip()
def _df_to_simpletable(df, align='r', float_format="%.4f", header=True,
index=True, table_dec_above='-', table_dec_below=None,
header_dec_below='-', pad_col=0, pad_index=0):
dat = df.copy()
dat = dat.applymap(lambda x: _formatter(x, float_format))
if header:
headers = [str(x) for x in dat.columns.tolist()]
else:
headers = None
if index:
stubs = [str(x) + int(pad_index) * ' ' for x in dat.index.tolist()]
else:
dat.iloc[:, 0] = [str(x) + int(pad_index) * ' ' for x in dat.iloc[:, 0]]
stubs = None
st = SimpleTable(np.array(dat), headers=headers, stubs=stubs,
ltx_fmt=fmt_latex, txt_fmt=fmt_txt)
st.output_formats['latex']['data_aligns'] = align
st.output_formats['txt']['data_aligns'] = align
st.output_formats['txt']['table_dec_above'] = table_dec_above
st.output_formats['txt']['table_dec_below'] = table_dec_below
st.output_formats['txt']['header_dec_below'] = header_dec_below
st.output_formats['txt']['colsep'] = ' ' * int(pad_col + 1)
return st
def _simple_tables(tables, settings, pad_col=None, pad_index=None):
simple_tables = []
float_format = '%.4f'
if pad_col is None:
pad_col = [0] * len(tables)
if pad_index is None:
pad_index = [0] * len(tables)
for i, v in enumerate(tables):
index = settings[i]['index']
header = settings[i]['header']
align = settings[i]['align']
simple_tables.append(_df_to_simpletable(v, align=align,
float_format=float_format,
header=header, index=index,
pad_col=pad_col[i],
pad_index=pad_index[i]))
return simple_tables
|
the-stack_106_19598
|
import copy
import torchvision.models as models
from ptsemseg.models.fcn import fcn8s, fcn16s, fcn32s
from ptsemseg.models.segnet import segnet
from ptsemseg.models.unet import unet
from ptsemseg.models.pspnet import pspnet
from ptsemseg.models.icnet import icnet
from ptsemseg.models.linknet import linknet
from ptsemseg.models.frrn import frrn
from ptsemseg.models.tlcnet import TLCNet, TLCNetU, TLCNetUmux, TLCNetUtlc, TLCNetUtlcmux
def get_model(model_dict, n_maxdisp=256, n_classes=1, version=None):
name = model_dict["arch"]
model = _get_model_instance(name)
param_dict = copy.deepcopy(model_dict)
param_dict.pop("arch")
if name in ["frrnA", "frrnB"]:
model = model(n_classes, **param_dict)
elif name in ["fcn32s", "fcn16s", "fcn8s"]:
model = model(n_classes=n_classes, **param_dict)
vgg16 = models.vgg16(pretrained=True)
model.init_vgg16_params(vgg16)
elif name == "segnet":
model = model(n_classes=n_classes, **param_dict)
vgg16 = models.vgg16(pretrained=True)
model.init_vgg16_params(vgg16)
elif name == "unet":
model = model(n_classes=n_classes, **param_dict)
elif name == "pspnet":
model = model(n_classes=n_classes, **param_dict)
elif name == "icnet":
model = model(n_classes=n_classes, **param_dict)
elif name == "icnetBN":
model = model(n_classes=n_classes, **param_dict)
elif name == "tlcnet":
model = model(maxdisp=n_maxdisp, **param_dict)
elif name == "tlcnetu":
model = model(n_classes=n_classes, **param_dict)
elif name=="tlcnetumux": # 2020.10.3 add
model = model(n_classes=n_classes, **param_dict)
elif name=="tlcnetutlc": # 2020.10.3 add
model = model(n_classes=n_classes, **param_dict)
elif name=="tlcnetutlcmux": # 2020.10.5 add
model = model(n_classes=n_classes, **param_dict)
else:
model = model(n_classes=n_classes, **param_dict)
return model
def _get_model_instance(name):
try:
return {
"fcn32s": fcn32s,
"fcn8s": fcn8s,
"fcn16s": fcn16s,
"unet": unet,
"segnet": segnet,
"pspnet": pspnet,
"icnet": icnet,
"icnetBN": icnet,
"linknet": linknet,
"frrnA": frrn,
"frrnB": frrn,
"tlcnet": TLCNet,
"tlcnetu": TLCNetU,
"tlcnetumux": TLCNetUmux,
"tlcnetutlc": TLCNetUtlc,
"tlcnetutlcmux": TLCNetUtlcmux
}[name]
except:
raise ("Model {} not available".format(name))
|
the-stack_106_19599
|
#!/usr/bin/env python
# coding=utf-8
"""
Copyright (c) 2017
Gabriel Pacheco <[email protected]>
Guilherme Sousa <[email protected]>
Joao Paulo Bastos <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import struct
import utils
# Logging setup
logging.basicConfig(level=logging.DEBUG, format="[%(asctime)s][%(levelname)s] %(message)s",
datefmt="%m-%d-%Y %I:%M:%S %p")
"""
| ===================================================================
| read_input_file: gets a list of services from input_file
| ===================================================================
"""
def read_input_file(input_file):
services = dict()
with open(input_file) as input_file:
for line in input_file.readlines():
line = ''.join(line).strip(' ')
if line != "#" and not line.isspace():
splitted_line = line.replace('\t', ' ').replace(' ', ' ').split()
service_key = splitted_line[0] # Extracts service name
services[service_key] = " ".join(splitted_line[1:]) # Service port, protocol and any more info
return services
"""
| ===================================================================
| local_db_search: takes a key searches for it in local storage
| ===================================================================
"""
def local_db_search(srv_sock, service_list, recv_message, ip_addr):
logger = logging.getLogger(__name__)
# Check if key is locally stored
if recv_message in service_list:
# Prepare response
send_header = struct.pack(utils.MESSAGE_FORMAT["RESPONSE"], utils.MESSAGE_TYPES["RESPONSE"])
send_message = send_header + recv_message + '\t' + service_list[recv_message] + '\x00\x00'
try:
srv_sock.sendto(send_message, (ip_addr[0], ip_addr[1]))
logger.info("Answer sent successfully to %s:%d", ip_addr[0], ip_addr[1])
except:
pass
finally:
return True
else:
return False
"""
| ===================================================================
| forward_query: takes a query and forward it to other peers
| ===================================================================
"""
def forward_query(srv_sock, recv_data, recv_header_size, ttl, from_addr, exclude_list, seq, other_peers):
logger = logging.getLogger(__name__)
# Prepare forward query
send_header = struct.pack(utils.MESSAGE_FORMAT["QUERY"], utils.MESSAGE_TYPES["QUERY"], ttl,
utils.ip_to_int(from_addr[0]), from_addr[1], seq)
send_message = send_header + recv_data[recv_header_size:]
for peer in other_peers:
# As opt args are stored as string, here we convert peer port to int in order
# to properly forward query message
peer = tuple([peer.split(":")[0], int(peer.split(":")[1])])
# print ("%s:%d => %s:%d" % (peer[0], int(peer[1]), peer_addr[0], int(peer_addr[1])))
if peer not in exclude_list:
try:
srv_sock.sendto(send_message, peer)
logger.info("Query forwarded successfully to %s", peer)
except:
pass
|
the-stack_106_19603
|
from sys import maxsize
from networkx.algorithms.shortest_paths.weighted \
import dijkstra_predecessor_and_distance
from domino_puzzle import BoardGraph, BadPositionError, find_boards_with_deap
class BlockingBoardGraph(BoardGraph):
def walk(self, board, size_limit=maxsize):
states = super().walk(board, size_limit=size_limit)
_preds, distances = dijkstra_predecessor_and_distance(self.graph,
board.display())
_dist, state = max((d, s) for s, d in distances.items())
self.last = self.start
self.start = state
return states
def move(self, domino, dx, dy):
""" Move a domino and calculate the new board state.
Afterward, put the board back in its original state.
@return: the new board state and remaining moves
@raise BadPositionError: if the move is illegal
"""
remaining = 1 # ignored for this puzzle
domino.move(dx, dy)
try:
board = domino.head.board
if not board.is_connected():
raise BadPositionError('Board is not connected.')
if board.hasMatch():
raise BadPositionError('Board has a match.')
return board.display(cropped=True), remaining
finally:
domino.move(-dx, -dy)
def main():
find_boards_with_deap(graph_class=BlockingBoardGraph)
if __name__ == '__main__':
main()
|
the-stack_106_19605
|
import time
from test.querybuildertestcase import QueryBuilderTestCase
from selenium.webdriver.common.alert import Alert
class QueryHistoryTest(QueryBuilderTestCase):
def test_query_history(self):
self.load_queries_into_history()
time.sleep(3)
self.assertIn('Custom query', self.browser.title)
self.assertEquals(2, len(self.elems('#modifyQueryForm tbody tr')))
self.assertEquals('query_2', self.elem('#modifyQueryForm tbody tr:nth-child(2) td:nth-child(2)').text)
root = self.elem('#modifyQueryForm tbody tr:nth-child(2) .historySummaryRoot').text
self.assertEquals('Bank', root)
showing = self.elems('#modifyQueryForm tbody tr:nth-child(2) .historySummaryShowing')
self.assertEquals(2, len(showing))
self.assertEquals(['Name', 'Debt'], [s.text for s in showing])
def test_delete_query_from_history(self):
self.load_queries_into_history()
self.assertEquals(2, len(self.elems('#modifyQueryForm tbody tr')))
self.elem('#selected_history_1').click()
self.elem('#delete_button').click()
Alert(self.browser).accept()
self.assertEquals(1, len(self.elems('#modifyQueryForm tbody tr')))
def load_queries_into_history(self):
query_1 = ''.join([
'<query model="testmodel" view="Bank.debtors.debt" sortOrder="Bank.debtors.debt asc">',
'</query>'
])
query_2 = ''.join([
'<query model="testmodel" view="Bank.name Bank.debtors.debt" sortOrder="Bank.debtors.debt asc">',
'<constraint path="Bank.debtors.debt" op=">" value="35,000,000"/>',
'</query>'
])
# Load queries into session history.
for q in [query_1, query_2]:
self.browser.get(self.base_url + '/customQuery.do')
self.findLink("Import query from XML").click()
self.elem('#xml').send_keys(q)
self.elem('#importQueriesForm input[type="submit"]').click()
self.elem('#showResult').click()
self.browser.get(self.base_url + '/customQuery.do')
def test_run_query_in_query_history(self):
self.load_queries_into_history()
self.elem('#modifyQueryForm tbody tr:nth-child(2) td:nth-child(7) span.fakelink:nth-child(1)').click()
self.assertRowCountIs(16)
def test_edit_query_in_query_history(self):
self.load_queries_into_history()
self.elem('#modifyQueryForm tbody tr:nth-child(2) td:nth-child(7) span.fakelink:nth-child(2)').click()
time.sleep(3)
self.assertIn('Query builder', self.browser.title)
self.assertEquals('Bank', self.elem('.typeSelected').text)
# Edit a constraint.
self.elem('img[title="Edit this constraint"]').click()
con_value = self.elem('#attribute8')
con_value.clear()
con_value.send_keys('40,000,000')
self.elem('#attributeSubmit').click()
# Check results.
self.elem('#showResult').click()
self.assertRowCountIs(15)
def test_export_query_in_query_history(self):
self.load_queries_into_history()
expected_query = '\n'.join([
' '.join([
'<query',
'name="query_2"',
'model="testmodel"',
'view="Bank.name Bank.debtors.debt"',
'longDescription=""',
'sortOrder="Bank.debtors.debt asc">'
]),
' <constraint path="Bank.debtors.debt" op=">" value="35,000,000"/>',
'</query>'])
self.elem('#modifyQueryForm tbody tr:nth-child(2) td:nth-child(7) span.fakelink:nth-child(3)').click()
self.assertEquals(expected_query, self.elem('body').text)
|
the-stack_106_19607
|
#!/usr/bin/env python3
import bs4
import requests
url = "https://github.com/trending?l=Python"
soup = bs4.BeautifulSoup(requests.get(url).content, "lxml") # or 'html5lib'
repos = soup.find("ol", class_="repo-list").find_all("a", href=True)
repos = (r.text.strip().replace(" ", "") for r in repos if "/" in r.text)
print("\n".join(repos))
|
the-stack_106_19608
|
"""Test methods for `zcode/inout/timer.py`.
Can be run with:
$ nosetests inout/tests/test_timer.py
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from six.moves import xrange
from numpy.testing import run_module_suite
import numpy as np
# from nose.tools import assert_true, assert_false, assert_equal
class TestTimer(object):
@classmethod
def setup_class(cls):
cls.NUM_ITER = 10
pass
@classmethod
def teardown_class(cls):
pass
def test_timer(self):
from zcode.inout.timer import Timings
# Create `Timer` object
times = Timings()
for ii in xrange(self.NUM_ITER):
times.start('one')
np.random.randint(-1000, 1000, size=1000000)
times.stop('one')
times.start('two')
NUM = 200
ss = np.arange(3, NUM+1, 2)
mroot = NUM ** 0.5
half = (NUM + 1)//2 - 1
ii = 0
mm = 3
while mm <= mroot:
if ss[ii]:
jj = np.int((mm * mm - 3)/2)
ss[jj] = 0
while jj < half:
ss[jj] = 0
jj += mm
ii += 1
mm = 2*ii + 3
times.stop('two')
times.start('three')
np.sort(np.random.permutation(np.arange(1000000)))
times.stop('three')
# for ii in xrange(len(times)):
# names = times.names()
# print(names[ii])
# for jj in times.durations[ii]:
# print(jj, end=' ')
# print("\n")
#
# print("Averages = ", times.average())
times.report()
# Run all methods as if with `nosetests ...`
if __name__ == "__main__":
run_module_suite()
|
the-stack_106_19609
|
"""add application draft status
Revision ID: 5a4b8a4896fb
Revises: a2327cf14296
Create Date: 2022-05-29 01:14:04.196440+00:00
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision = "5a4b8a4896fb"
down_revision = "a2327cf14296"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"applications",
sa.Column(
"draft_status",
sa.Enum("PENDING", "REJECTED", "ACCEPTED", name="status"),
server_default="PENDING",
nullable=False,
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("applications", "draft_status")
# ### end Alembic commands ###
|
the-stack_106_19610
|
from setuptools import setup
with open("README.md", "r") as f:
long_description = f.read()
setup(
name="qwhale_client", # How you named your package folder
packages=["qwhale_client"], # Chose the same as "name"
include_package_data=True,
version="v0.1.20", # Start with a small number and increase it with every change you make
license="MIT", # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description="Python client for Qwhale API", # Give a short description about your library
long_description=long_description,
long_description_content_type="text/markdown",
author="Yehoyada.s", # Type in your name
author_email="[email protected]", # Type in your E-Mail
url="https://qwhale.ml", # Provide either the link to your github or to your website
download_url="https://github.com/hvuhsg/qwhale_client/archive/0.1.2.tar.gz",
keywords=[
"API",
"Client",
"Qwhale",
"QWhale",
"client",
"storage",
"MongoDB",
], # Keywords that define your package best
install_requires=["pymongo", "requests"], # I get to this in a second
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers", # Define that your audience are developers
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License", # Again, pick a license
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
|
the-stack_106_19612
|
"""Recursive Policy Gradients."""
import os
import sys
import numpy as np
import tensorflow as tf
from ray import tune
from . import utils as U
from .meta import make_with_custom_variables
def stop_forward(x):
"""Implements the Magic Box operator."""
with tf.name_scope("stop_forward"):
op = tf.exp(x - tf.stop_gradient(x))
return op
def build_policy(env, make_policy, scope, reuse=None, prev=None):
"""Creates and builds a new policy."""
print("env.ob_space_shape", env.ob_space_shape)
pi = make_policy(
env.ob_space_shape,
env.NUM_ACTIONS,
prev=prev,
batch_size=env.batch_size,
)
pi.build(scope, reuse=reuse)
return pi
def build_losses(scope, policies, use_baseline=True, use_dice=True):
"""Builds policy and value loss tensors for the given policies.
Args:
scope (str): The name of the scope to use for internals.
policies (list): A list of Policy objects. Assumed to be built.
use_baseline (bool): A flag for whether to use a baseline for PG.
use_dice (bool): Whether to use the DiCE operator.
Returns:
policy_losses: list of <float32> [] tensors for policy losses.
value_losses: list of <float32> [] tensors for value function losses.
"""
with tf.name_scope(scope):
# Build return weights.
if use_dice:
ret_weights = stop_forward(
sum(pi.log_pi_acs_cumsum for pi in policies)
)
else:
ret_weights = sum(pi.log_pi_acs_cumsum for pi in policies)
# Build policy losses.
if use_baseline:
with tf.name_scope("baseline"):
if use_dice:
baseline_weights = 1 - stop_forward(
sum(pi.log_pi_acs for pi in policies)
)
else:
baseline_weights = -sum(pi.log_pi_acs for pi in policies)
baselines = [
tf.multiply(tf.stop_gradient(pi.vpred), pi.discount)
for pi in policies
]
policy_losses = [
-tf.reduce_mean(
tf.reduce_sum(tf.multiply(pi.rets_ph, ret_weights), axis=0)
)
- tf.reduce_mean(
tf.reduce_sum(tf.multiply(b, baseline_weights), axis=0)
)
for pi, b in zip(policies, baselines)
]
else:
rets = [
pi.rets_ph - tf.reduce_mean(pi.rets_ph, axis=1, keepdims=True)
for pi in policies
]
policy_losses = [
-tf.reduce_mean(
tf.reduce_sum(tf.multiply(ret, ret_weights), axis=0)
)
for pi, ret in zip(policies, rets)
]
# Build value function losses.
value_losses = [
tf.reduce_mean(tf.square(pi.vpred - pi.values_ph))
for pi in policies
]
return policy_losses, value_losses
def build_grads(scope, losses, params):
"""Builds gradients of the loss functions w.r.t. parameters.
Args:
scope (str): The name of the scope to use for internals.
losses (list): A list of loss tensors.
params (list): A list of (lists of) tf.Variables.
Returns:
grads: A list of gradient tensors of the losses w.r.t. params.
"""
assert len(losses) == len(params)
with tf.name_scope(scope):
grads = [
tf.gradients(loss, param) for loss, param in zip(losses, params)
]
return grads
def build_new_params(
scope, policies, k, *, lr, asymm=True, use_baseline=True, use_dice=True
):
"""Builds new parameters for each policy performing a MAML-like update.
To understand how this works, consider 3 policies with parameters
`old_params_1`, `old_params_2`, `old_params_3`. If `k == 1` and
`asymm == True`, we have:
new_params_1 = old_params_1
new_params_2 = old_params_2 - lr * grad loss_2
new_params_3 = old_params_3 - lr * grad loss_3
If `asymm == False`, `new_params_1` will be also updated. The asymmetric
updates are used as lookahead steps performed by the LOLA agents. In the
given example, agent 1 "imagines" other agents do gradient updates with the
specified learning rate (lr); it will then backpropagate through these
updates and update its own parameters respectively.
Args:
scope (str): The name of the scope to use for internals.
policies (list): A list of Policy objects. Assumed to be built.
k (int): The policy index which parameters are NOT updated but instead
copied over.
asymm (bool): Whether to perform symmetric or asymmetric update.
use_baseline (bool): A flag for whether to use a baseline for PG.
use_dice (bool): Whether to use the DiCE operator.
"""
with tf.name_scope(scope):
# Build inner losses.
policy_losses, value_losses = build_losses(
None, policies, use_baseline=use_baseline, use_dice=use_dice
)
losses = policy_losses
# losses = [pl + vl for pl, vl in zip(policy_losses, value_losses)]
params = [pi.parameters for pi in policies]
# Build gradients.
grads = build_grads(None, losses, params)
# Build new parameters.
new_params = []
for i, (pi, grad) in enumerate(zip(policies, grads)):
if (i != k) or (not asymm):
new_p = [
(p - lr * g) if g is not None else p
for p, g in zip(pi.parameters, grad)
]
else:
new_p = pi.root.parameters
new_params.append(new_p)
return new_params
def get_update(policies, losses, update_ops, sess, gamma=0.96):
"""Creates an update function.
Args:
policies (list): A list of Policy objects. Assumed to be built.
Used to construct the `feed_dict` for the `sess.run`.
losses (list): A list of <float32> [] tensors values for which will be
computed and returned.
update_ops (list): A list of update ops.
sess (tf.Session): A tf.Session instance that will be used for running.
gamma (float): The discount factor that will be fed into the graph.
TODO: perhaps move this argument somewhere else?
"""
def update(traces, *, parent_traces=[]):
feed_list = sum(
[
pi.get_feed_list(trace) + [(pi.gamma_ph, [[gamma]])]
for pi, trace in zip(policies, traces)
],
[],
)
# Construct the parent feed list.
parent_policies = zip(*[pi.parents for pi in policies])
parent_feed_list = sum(
[
pi.get_feed_list(trace) + [(pi.gamma_ph, [[gamma]])]
for parents, traces in zip(parent_policies, parent_traces)
for pi, trace in zip(parents, traces)
],
[],
)
# Compute.
feed_dict = dict(feed_list + parent_feed_list)
results = sess.run(losses + update_ops, feed_dict=feed_dict)
return results[: len(losses)]
return update
def compute_values(rews, last_vpreds, *, gamma, use_gae=False):
"""Compute the estimated values for the given sequence of rewards."""
# TODO: add GAE as an option.
T = len(rews)
values = [last_vpreds]
for t in reversed(range(T)):
values_t = [gamma * v + r for v, r in zip(values[-1], rews[t])]
values.append(values_t)
return list(reversed(values[1:]))
def rollout(
env,
policies,
rollout_policies,
sess,
*,
gamma,
parent_traces=[],
use_toolbox_env=False,
):
"""Rolls out a single batch of episode of the policies in the given environment.
To avoid quadratic time complexity of the rollout in the number time steps
for the recursively generated policies, we never use their graphs directly
for rollouts. Instead, we copy the values of the policy parameters into the
corresponding rollout policies and run those in the environment.
Args:
env (gym.Env): An instance of the environment.
policies (list): A list of Policy objects. Assumed to be built.
rollout_policies (list): Another set of policies which parameters are
plain variables (not function of other policies and rollouts).
sess (tf.Session): A tf.Session instance that will be used for running.
gamma (float): The discount factor that will be fed into the graph.
parent_traces (list): A list of traces that are fed into the
corresponding placeholders if the parameters of the policies depend
on other (parent) policies.
Returns:
trace: A list of obs, acs, rets, values, infos.
"""
obs, acs, rets, rews, values, infos = [], [], [], [], [], []
to_log = []
# Construct the parent feed list.
parent_policies = zip(*[pi.parents for pi in policies])
parent_feed_list = sum(
[
pi.get_feed_list(trace) + [(pi.gamma_ph, [[gamma]])]
for parents, traces in zip(parent_policies, parent_traces)
for pi, trace in zip(parents, traces)
],
[],
)
# Cache parameters and push them into rollout policies.
assign_ops = [
tf.assign(pr, p)
for pi, pi_roll in zip(policies, rollout_policies)
for p, pr in zip(pi.parameters, pi_roll.parameters)
]
sess.run(assign_ops, feed_dict=dict(parent_feed_list))
# Roll out
if use_toolbox_env:
raise NotImplementedError()
obs = env.reset()
ob = obs["player_red"]
else:
ob, all_info = env.reset()
info = all_info.pop("available_actions")
done = False
gamma_t = 1.0
t = 0
while not done:
obs.append(ob)
infos.append(info)
ac = [
pi.act(o, i, sess) for pi, o, i in zip(rollout_policies, ob, info)
]
ob, rew, done, all_info = env.step(ac)
acs.append(ac)
rews.append(rew)
rets.append([r * gamma_t for r in rew])
gamma_t *= gamma
t += 1
info = all_info.pop("available_actions")
to_log.append(all_info)
# Adjust rets and compute value estimates
last_vpreds = [
pi.predict(o, sess) * 0 for pi, o in zip(rollout_policies, ob)
]
# for k, last_vpred in enumerate(last_vpreds):
# rets[-1][k] += gamma_t * last_vpred
values = compute_values(rews, last_vpreds, gamma=gamma)
obs = list(map(np.asarray, zip(*obs)))
acs = list(map(np.asarray, zip(*acs)))
rets = list(map(np.asarray, zip(*rets)))
values = list(map(np.asarray, zip(*values)))
infos = list(map(np.asarray, zip(*infos)))
trace = list(zip(obs, acs, rets, values, infos))
return trace, to_log
def gen_trace_batches(trace, *, batch_size):
"""Splits the trace and yields batches."""
obs, acs, rets, values, infos = zip(*trace)
permutation = np.random.permutation(len(obs[0]))
for i in range(0, len(obs[0]), batch_size):
idx = permutation[i : i + batch_size]
trace_batch = list(
zip(
[ob[idx] for ob in obs],
[ac[idx] for ac in acs],
[ret[idx] for ret in rets],
[val[idx] for val in values],
[info[idx] for info in infos],
)
)
yield trace_batch
def build_graph(
env,
make_policy,
make_optimizer,
*,
lr_inner=1.0, # lr for the inner loop steps
lr_outer=1.0, # lr for the outer loop steps
lr_value=0.1, # lr for the value function estimator
lr_om=1.0, # lr for opponent modeling
n_agents=2,
n_inner_steps=1,
inner_asymm=True,
use_baseline=True,
use_dice=True,
use_opp_modeling=False,
batch_size=64,
):
"""Builds all components of the graph."""
# Root policies.
print("Building root policies...", end="")
sys.stdout.flush()
root_policies = []
for k in range(n_agents):
pi = build_policy(env, make_policy, "root/pi_%d" % k)
root_policies.append(pi)
print("Done.")
# Opponent models.
if use_opp_modeling:
for k, pi in enumerate(root_policies):
pi.opponents = [
build_policy(env, make_policy, "root/pi_%d/opp_%d" % (k, j))
for j in range(n_agents - 1)
]
else:
for k, pi in enumerate(root_policies):
pi.opponents = [
make_with_custom_variables(
lambda: build_policy(
env,
make_policy,
"root/pi_%d/opp_%d" % (k, j - (j > k)),
),
opp.parameters,
)
for j, opp in enumerate(root_policies)
if j != k
]
# Rollout policies (used to speed up rollouts).
print("Building rollout policies...", end="")
sys.stdout.flush()
rollout_policies = []
for k in range(n_agents):
pi = build_policy(env, make_policy, "rollout/pi_%d" % k)
rollout_policies.append(pi)
print("Done.")
# Build asymmetric inner loops recursively.
print("Building asymmetric inner loops...", end="")
sys.stdout.flush()
policies = root_policies
for m in range(n_inner_steps):
new_policies = []
for k in range(n_agents):
# Build new parameters.
new_params, *new_params_opp = build_new_params(
"inner_%d/params_%d" % (m + 1, k),
[policies[k]] + policies[k].opponents,
0,
lr=lr_inner,
asymm=inner_asymm,
use_baseline=use_baseline,
use_dice=use_dice,
)
# Build new policy and opponents.
new_policy = make_with_custom_variables(
lambda: build_policy(
env,
make_policy,
"inner_%d/pi_%d" % (m + 1, k),
prev=policies[k],
),
new_params,
)
new_policy.opponents = [
make_with_custom_variables(
lambda: build_policy(
env,
make_policy,
"inner_%d/pi_%d/opp_%d" % (m + 1, k, i),
prev=prev_opp,
),
opp_params,
)
for i, (opp_params, prev_opp) in enumerate(
zip(new_params_opp, policies[k].opponents)
)
]
new_policies.append(new_policy)
policies = new_policies
print("%d..." % (m + 1), end="")
sys.stdout.flush()
print("Done.")
# Build the outer loop.
print("Building the outer loop...", end="")
sys.stdout.flush()
pol_losses, val_losses = [], []
update_pol_ops, update_val_ops = [], []
for k in range(n_agents):
params = policies[k].root.parameters
pol_loss, val_loss = build_losses(
"outer_%d" % k,
[policies[k]] + policies[k].opponents,
use_baseline=use_baseline,
use_dice=use_dice,
)
pol_losses.append([pol_loss[0]])
val_losses.append([val_loss[0]])
opt_pol = make_optimizer(lr=lr_outer)
opt_val = make_optimizer(lr=lr_value)
upd_pol = [opt_pol.minimize(pol_loss[0], var_list=params)]
upd_val = [opt_val.minimize(val_loss[0], var_list=params)]
update_pol_ops.append(upd_pol)
update_val_ops.append(upd_val)
print("Done.")
# Build opponent modeling.
om_losses = []
update_om_ops = []
if use_opp_modeling:
for k in range(n_agents):
opp_models = policies[k].root.opponents
true_opponents = [
pi.root for j, pi in enumerate(policies) if j != k
]
losses = [-tf.reduce_mean(opp.log_pi_acs) for opp in opp_models]
params = [opp.parameters for opp in opp_models]
opts = [
make_optimizer(lr=lr_om) for opp in policies[k].root.opponents
]
upds = [
opt.minimize(loss, var_list=param)
for opt, loss, param in zip(opts, losses, params)
]
om_losses.append(losses)
update_om_ops.append(upds)
return (
policies,
rollout_policies,
pol_losses,
val_losses,
om_losses,
update_pol_ops,
update_val_ops,
update_om_ops,
)
def train(
env,
make_policy,
make_optimizer,
*,
epochs=100,
gamma=0.96,
lr_inner=1.0, # lr for the inner loop steps
lr_outer=1.0, # lr for the outer loop steps
lr_value=0.1, # lr for the value function estimator
lr_om=0.1, # lr for opponent modeling
n_agents=2,
n_inner_steps=1,
inner_asymm=True,
om_batch_size=64, # batch size used for fitting opponent models
om_epochs=5, # epochs per iteration to fit opponent models
value_batch_size=64, # batch size used for fitting the values
value_epochs=5, # epochs per iteration to fit value functions
use_baseline=True,
use_dice=True,
use_opp_modeling=False,
save_dir=".",
):
"""The main training function."""
os.makedirs(save_dir, exist_ok=True)
# Build.
tf.reset_default_graph()
(
policies,
rollout_policies,
pol_losses,
val_losses,
om_losses,
update_pol_ops,
update_val_ops,
update_om_ops,
) = build_graph(
env,
make_policy,
make_optimizer,
lr_inner=lr_inner,
lr_outer=lr_outer,
lr_value=lr_value,
lr_om=lr_om,
n_agents=n_agents,
n_inner_steps=n_inner_steps,
use_baseline=use_baseline,
use_dice=use_dice,
use_opp_modeling=use_opp_modeling,
inner_asymm=inner_asymm,
)
# Train.
acs_all = []
rets_all = []
params_all = []
params_om_all = []
times_all = []
pick_speed_all = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Construct update functions.
update_funcs = {
"policy": [
get_update(
[policies[k]] + policies[k].opponents,
pol_losses[k],
update_pol_ops[k],
sess,
gamma=gamma,
)
for k in range(n_agents)
],
"value": [
get_update(
[policies[k]],
val_losses[k],
update_val_ops[k],
sess,
gamma=gamma,
)
for k in range(n_agents)
],
"opp": [
get_update(
policies[k].root.opponents,
om_losses[k],
update_om_ops[k],
sess,
gamma=gamma,
)
for k in range(n_agents)
]
if om_losses
else None,
}
root_policies = [pi.root for pi in policies]
# Train for a number of epochs.
for e in range(epochs):
times = []
# Model opponents.
if use_opp_modeling:
with U.elapsed_timer() as om_timer:
# Fit opponent models for several epochs.
om_losses = np.zeros((n_agents, n_agents - 1))
for om_ep in range(om_epochs):
traces, _ = rollout(
env,
root_policies,
rollout_policies,
sess,
gamma=gamma,
parent_traces=[],
)
om_traces = [
[tr for j, tr in enumerate(traces) if j != k]
for k in range(n_agents)
]
for k in range(n_agents):
update_om = update_funcs["opp"][k]
for trace_batch in gen_trace_batches(
om_traces[k], batch_size=om_batch_size
):
update_om(trace_batch)
loss = update_om(om_traces[k])
om_losses[k] += np.asarray(loss)
om_losses /= om_epochs
times.append(om_timer())
else:
om_losses = np.array([])
print("start Fit function")
# Fit value functions.
with U.elapsed_timer() as val_timer:
# Fit value functions for several epochs.
value_losses = np.zeros(n_agents)
for v_ep in range(value_epochs):
traces, _ = rollout(
env,
root_policies,
rollout_policies,
sess,
gamma=gamma,
parent_traces=[],
)
for k in range(n_agents):
update_val = update_funcs["value"][k]
for trace_batch in gen_trace_batches(
[traces[k]], batch_size=value_batch_size
):
update_val(trace_batch)
loss = update_val([traces[k]])
value_losses[k] += loss[0]
value_losses /= value_epochs
times.append(val_timer())
# Save parameters of the agents (for debug purposes).
params = sess.run(
[tf.squeeze(pi.root.parameters[0]) for pi in policies]
)
params_all.append(params)
# Save parameters of the opponent models (for debug purposes).
params = [
sess.run(
[
tf.squeeze(opp.root.parameters[0])
for opp in pi.opponents
]
)
for pi in policies
]
params_om_all.append(params)
print("start Inner loops")
# Inner loop rollouts (lookahead steps).
inner_all_to_log = []
with U.elapsed_timer() as inner_timer:
inner_traces = []
for k in range(n_agents):
parent_traces = []
to_log = []
for m in range(n_inner_steps):
policies_k = [policies[k].parents[m]] + [
opp.parents[m] for opp in policies[k].opponents
]
traces, sub_to_log = rollout(
env,
policies_k,
rollout_policies,
sess,
gamma=gamma,
parent_traces=parent_traces,
)
parent_traces.append(traces)
to_log.append(sub_to_log)
inner_traces.append(parent_traces)
inner_all_to_log.append(to_log)
times.append(inner_timer())
print("start Outer loops")
# Outer loop rollouts (each agent plays against updated opponents).
outer_all_to_log = []
with U.elapsed_timer() as outer_timer:
outer_traces = []
for k in range(n_agents):
parent_traces = inner_traces[k]
policies_k = [policies[k]] + policies[k].opponents
traces, to_log = rollout(
env,
policies_k,
rollout_policies,
sess,
gamma=gamma,
parent_traces=parent_traces,
)
outer_traces.append(traces)
outer_all_to_log.append([to_log])
times.append(outer_timer())
# Updates.
update_time = 0
policy_losses = []
for k in range(n_agents):
# Policy
with U.elapsed_timer() as pol_upd_timer:
parent_traces = inner_traces[k]
update_pol = update_funcs["policy"][k]
loss = update_pol(
outer_traces[k], parent_traces=parent_traces
)
policy_losses.append(loss)
update_time += pol_upd_timer()
to_report = {}
for ag_idx in range(n_agents):
print("== For ag_idx", ag_idx, "==")
# Logging.
if n_inner_steps > 0:
# obs, acs, rets, vals, infos = list(zip(*inner_traces[0][ag_idx]))
obs, acs, rets, vals, infos = list(
zip(*inner_traces[ag_idx][0])
)
all_to_log = inner_all_to_log
else:
obs, acs, rets, vals, infos = list(
zip(*outer_traces[ag_idx])
)
all_to_log = outer_all_to_log
all_to_log = [
per_agent_to_log[0] for per_agent_to_log in all_to_log
][ag_idx]
policy_loss = policy_losses[ag_idx]
times_all.append(times)
acs_all.append([ac.mean() for ac in acs])
generate_rate_trace = [
all_to_log[i].pop("generate_rate")
for i in range(len(all_to_log))
if "generate_rate" in all_to_log[i].keys()
]
pick_speed_all.append(
sum(generate_rate_trace) / len(generate_rate_trace)
if len(generate_rate_trace) > 0
else -1
)
rets_all.append(
[r.sum(axis=0).mean() * (1 - gamma) for r in rets]
)
# rets_all.append([r.sum(axis=0).mean() for r in rets])
print("Epoch:", e + 1, "-" * 60)
# print("Policy losses:", list(map(sum, policy_losses)))
print("Value losses:", value_losses.tolist())
print("OM losses:", om_losses.tolist())
print("Returns:", rets_all[-1])
print("Defection rate:", acs_all[-1])
print("Pick speed:", pick_speed_all[-1])
# # Save stuff
# np.save(save_dir + '/acs.npy', acs_all)
# np.save(save_dir + '/rets.npy', rets_all)
# np.save(save_dir + '/params.npy', params_all)
# np.save(save_dir + '/params_om.npy', params_om_all)
# np.save(save_dir + '/times.npy', times_all)
# np.save(save_dir + '/pick_speed.npy', pick_speed_all)
info_from_env = {}
# Only keep the last info
for to_log in all_to_log:
info_from_env.update(to_log)
initial_info = {
"returns_player_1": rets_all[-1][0],
"returns_player_2": rets_all[-1][1],
"defection_rate_player_1": acs_all[-1][0],
"defection_rate_player_2": acs_all[-1][1],
"pick_speed_global": pick_speed_all[-1],
"policy_loss": policy_loss,
}
for k, v in info_from_env.items():
to_report[f"ag_{ag_idx}_{k}"] = v
for k, v in initial_info.items():
to_report[f"ag_{ag_idx}_{k}"] = v
tune.report(**to_report)
|
the-stack_106_19614
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import print_function
import numpy as np
from openmdao.api import Component
'''
Component which combines the mass design variables with the masses of the points not defined as design variables to create a single input vector for the Nastran components
'''
class MixedInputDesvarM(Component):
def __init__(self, mn, m_desvar_list=[]):
super(MixedInputDesvarM, self).__init__()
#Number of concentrated masses
self.mn = mn
#List containing the indices of the thickness vector defined as design variables
self.m_desvar_list = m_desvar_list
#Vector containing the baseline or default concentrated masses' values
self.add_param('m_indep', val=np.zeros(self.mn))
#Vector containing concentrated masses design variables
self.add_param('m_desvar', val=np.zeros(len(m_desvar_list)))
#Vector containing the concentrated masses' values
self.add_output('m', val=np.zeros(self.mn))
def solve_nonlinear(self, params, unknowns, resids):
m_desvar_list = self.m_desvar_list
m_indep = params['m_indep']
m_desvar = params['m_desvar']
m = m_indep
#Substitute the design variables to create the thickness and mass vectors that are the inputs of Nastran components
for i in range(len(m_desvar_list)):
m[m_desvar_list[i]] = m_desvar[i]
unknowns['m'] = m
|
the-stack_106_19615
|
# orm/state.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation of instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
import weakref
from .. import util
from .. import inspection
from .. import exc as sa_exc
from . import exc as orm_exc, interfaces
from .path_registry import PathRegistry
from .base import PASSIVE_NO_RESULT, SQL_OK, NEVER_SET, ATTR_WAS_SET, \
NO_VALUE, PASSIVE_NO_INITIALIZE, INIT_OK, PASSIVE_OFF
from . import base
@inspection._self_inspects
class InstanceState(interfaces.InspectionAttr):
"""tracks state information at the instance level.
The :class:`.InstanceState` is a key object used by the
SQLAlchemy ORM in order to track the state of an object;
it is created the moment an object is instantiated, typically
as a result of :term:`instrumentation` which SQLAlchemy applies
to the ``__init__()`` method of the class.
:class:`.InstanceState` is also a semi-public object,
available for runtime inspection as to the state of a
mapped instance, including information such as its current
status within a particular :class:`.Session` and details
about data on individual attributes. The public API
in order to acquire a :class:`.InstanceState` object
is to use the :func:`.inspect` system::
>>> from sqlalchemy import inspect
>>> insp = inspect(some_mapped_object)
.. seealso::
:ref:`core_inspection_toplevel`
"""
session_id = None
key = None
runid = None
load_options = util.EMPTY_SET
load_path = ()
insert_order = None
_strong_obj = None
modified = False
expired = False
_deleted = False
_load_pending = False
_orphaned_outside_of_session = False
is_instance = True
identity_token = None
callables = ()
"""A namespace where a per-state loader callable can be associated.
In SQLAlchemy 1.0, this is only used for lazy loaders / deferred
loaders that were set up via query option.
Previously, callables was used also to indicate expired attributes
by storing a link to the InstanceState itself in this dictionary.
This role is now handled by the expired_attributes set.
"""
def __init__(self, obj, manager):
self.class_ = obj.__class__
self.manager = manager
self.obj = weakref.ref(obj, self._cleanup)
self.committed_state = {}
self.expired_attributes = set()
expired_attributes = None
"""The set of keys which are 'expired' to be loaded by
the manager's deferred scalar loader, assuming no pending
changes.
see also the ``unmodified`` collection which is intersected
against this set when a refresh operation occurs."""
@util.memoized_property
def attrs(self):
"""Return a namespace representing each attribute on
the mapped object, including its current value
and history.
The returned object is an instance of :class:`.AttributeState`.
This object allows inspection of the current data
within an attribute as well as attribute history
since the last flush.
"""
return util.ImmutableProperties(
dict(
(key, AttributeState(self, key))
for key in self.manager
)
)
@property
def transient(self):
"""Return true if the object is :term:`transient`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and \
not self._attached
@property
def pending(self):
"""Return true if the object is :term:`pending`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and \
self._attached
@property
def deleted(self):
"""Return true if the object is :term:`deleted`.
An object that is in the deleted state is guaranteed to
not be within the :attr:`.Session.identity_map` of its parent
:class:`.Session`; however if the session's transaction is rolled
back, the object will be restored to the persistent state and
the identity map.
.. note::
The :attr:`.InstanceState.deleted` attribute refers to a specific
state of the object that occurs between the "persistent" and
"detached" states; once the object is :term:`detached`, the
:attr:`.InstanceState.deleted` attribute **no longer returns
True**; in order to detect that a state was deleted, regardless
of whether or not the object is associated with a :class:`.Session`,
use the :attr:`.InstanceState.was_deleted` accessor.
.. versionadded: 1.1
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and \
self._attached and self._deleted
@property
def was_deleted(self):
"""Return True if this object is or was previously in the
"deleted" state and has not been reverted to persistent.
This flag returns True once the object was deleted in flush.
When the object is expunged from the session either explicitly
or via transaction commit and enters the "detached" state,
this flag will continue to report True.
.. versionadded:: 1.1 - added a local method form of
:func:`.orm.util.was_deleted`.
.. seealso::
:attr:`.InstanceState.deleted` - refers to the "deleted" state
:func:`.orm.util.was_deleted` - standalone function
:ref:`session_object_states`
"""
return self._deleted
@property
def persistent(self):
"""Return true if the object is :term:`persistent`.
An object that is in the persistent state is guaranteed to
be within the :attr:`.Session.identity_map` of its parent
:class:`.Session`.
.. versionchanged:: 1.1 The :attr:`.InstanceState.persistent`
accessor no longer returns True for an object that was
"deleted" within a flush; use the :attr:`.InstanceState.deleted`
accessor to detect this state. This allows the "persistent"
state to guarantee membership in the identity map.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and \
self._attached and not self._deleted
@property
def detached(self):
"""Return true if the object is :term:`detached`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and not self._attached
@property
@util.dependencies("sqlalchemy.orm.session")
def _attached(self, sessionlib):
return self.session_id is not None and \
self.session_id in sessionlib._sessions
@property
@util.dependencies("sqlalchemy.orm.session")
def session(self, sessionlib):
"""Return the owning :class:`.Session` for this instance,
or ``None`` if none available.
Note that the result here can in some cases be *different*
from that of ``obj in session``; an object that's been deleted
will report as not ``in session``, however if the transaction is
still in progress, this attribute will still refer to that session.
Only when the transaction is completed does the object become
fully detached under normal circumstances.
"""
return sessionlib._state_session(self)
@property
def object(self):
"""Return the mapped object represented by this
:class:`.InstanceState`."""
return self.obj()
@property
def identity(self):
"""Return the mapped identity of the mapped object.
This is the primary key identity as persisted by the ORM
which can always be passed directly to
:meth:`.Query.get`.
Returns ``None`` if the object has no primary key identity.
.. note::
An object which is :term:`transient` or :term:`pending`
does **not** have a mapped identity until it is flushed,
even if its attributes include primary key values.
"""
if self.key is None:
return None
else:
return self.key[1]
@property
def identity_key(self):
"""Return the identity key for the mapped object.
This is the key used to locate the object within
the :attr:`.Session.identity_map` mapping. It contains
the identity as returned by :attr:`.identity` within it.
"""
# TODO: just change .key to .identity_key across
# the board ? probably
return self.key
@util.memoized_property
def parents(self):
return {}
@util.memoized_property
def _pending_mutations(self):
return {}
@util.memoized_property
def mapper(self):
"""Return the :class:`.Mapper` used for this mapepd object."""
return self.manager.mapper
@property
def has_identity(self):
"""Return ``True`` if this object has an identity key.
This should always have the same value as the
expression ``state.persistent or state.detached``.
"""
return bool(self.key)
@classmethod
def _detach_states(self, states, session, to_transient=False):
persistent_to_detached = \
session.dispatch.persistent_to_detached or None
deleted_to_detached = \
session.dispatch.deleted_to_detached or None
pending_to_transient = \
session.dispatch.pending_to_transient or None
persistent_to_transient = \
session.dispatch.persistent_to_transient or None
for state in states:
deleted = state._deleted
pending = state.key is None
persistent = not pending and not deleted
state.session_id = None
if to_transient and state.key:
del state.key
if persistent:
if to_transient:
if persistent_to_transient is not None:
obj = state.obj()
if obj is not None:
persistent_to_transient(session, obj)
elif persistent_to_detached is not None:
obj = state.obj()
if obj is not None:
persistent_to_detached(session, obj)
elif deleted and deleted_to_detached is not None:
obj = state.obj()
if obj is not None:
deleted_to_detached(session, obj)
elif pending and pending_to_transient is not None:
obj = state.obj()
if obj is not None:
pending_to_transient(session, obj)
state._strong_obj = None
def _detach(self, session=None):
if session:
InstanceState._detach_states([self], session)
else:
self.session_id = self._strong_obj = None
def _dispose(self):
self._detach()
del self.obj
def _cleanup(self, ref):
"""Weakref callback cleanup.
This callable cleans out the state when it is being garbage
collected.
this _cleanup **assumes** that there are no strong refs to us!
Will not work otherwise!
"""
instance_dict = self._instance_dict()
if instance_dict is not None:
instance_dict._fast_discard(self)
del self._instance_dict
# we can't possibly be in instance_dict._modified
# b.c. this is weakref cleanup only, that set
# is strong referencing!
# assert self not in instance_dict._modified
self.session_id = self._strong_obj = None
del self.obj
def obj(self):
return None
@property
def dict(self):
"""Return the instance dict used by the object.
Under normal circumstances, this is always synonymous
with the ``__dict__`` attribute of the mapped object,
unless an alternative instrumentation system has been
configured.
In the case that the actual object has been garbage
collected, this accessor returns a blank dictionary.
"""
o = self.obj()
if o is not None:
return base.instance_dict(o)
else:
return {}
def _initialize_instance(*mixed, **kwargs):
self, instance, args = mixed[0], mixed[1], mixed[2:] # noqa
manager = self.manager
manager.dispatch.init(self, args, kwargs)
try:
return manager.original_init(*mixed[1:], **kwargs)
except:
with util.safe_reraise():
manager.dispatch.init_failure(self, args, kwargs)
def get_history(self, key, passive):
return self.manager[key].impl.get_history(self, self.dict, passive)
def get_impl(self, key):
return self.manager[key].impl
def _get_pending_mutation(self, key):
if key not in self._pending_mutations:
self._pending_mutations[key] = PendingCollection()
return self._pending_mutations[key]
def __getstate__(self):
state_dict = {'instance': self.obj()}
state_dict.update(
(k, self.__dict__[k]) for k in (
'committed_state', '_pending_mutations', 'modified',
'expired', 'callables', 'key', 'parents', 'load_options',
'class_', 'expired_attributes'
) if k in self.__dict__
)
if self.load_path:
state_dict['load_path'] = self.load_path.serialize()
state_dict['manager'] = self.manager._serialize(self, state_dict)
return state_dict
def __setstate__(self, state_dict):
inst = state_dict['instance']
if inst is not None:
self.obj = weakref.ref(inst, self._cleanup)
self.class_ = inst.__class__
else:
# None being possible here generally new as of 0.7.4
# due to storage of state in "parents". "class_"
# also new.
self.obj = None
self.class_ = state_dict['class_']
self.committed_state = state_dict.get('committed_state', {})
self._pending_mutations = state_dict.get('_pending_mutations', {})
self.parents = state_dict.get('parents', {})
self.modified = state_dict.get('modified', False)
self.expired = state_dict.get('expired', False)
if 'callables' in state_dict:
self.callables = state_dict['callables']
try:
self.expired_attributes = state_dict['expired_attributes']
except KeyError:
self.expired_attributes = set()
# 0.9 and earlier compat
for k in list(self.callables):
if self.callables[k] is self:
self.expired_attributes.add(k)
del self.callables[k]
else:
if 'expired_attributes' in state_dict:
self.expired_attributes = state_dict['expired_attributes']
else:
self.expired_attributes = set()
self.__dict__.update([
(k, state_dict[k]) for k in (
'key', 'load_options'
) if k in state_dict
])
if self.key:
try:
self.identity_token = self.key[2]
except IndexError:
# 1.1 and earlier compat before identity_token
assert len(self.key) == 2
self.key = self.key + (None, )
self.identity_token = None
if 'load_path' in state_dict:
self.load_path = PathRegistry.\
deserialize(state_dict['load_path'])
state_dict['manager'](self, inst, state_dict)
def _reset(self, dict_, key):
"""Remove the given attribute and any
callables associated with it."""
old = dict_.pop(key, None)
if old is not None and self.manager[key].impl.collection:
self.manager[key].impl._invalidate_collection(old)
self.expired_attributes.discard(key)
if self.callables:
self.callables.pop(key, None)
def _copy_callables(self, from_):
if 'callables' in from_.__dict__:
self.callables = dict(from_.callables)
@classmethod
def _instance_level_callable_processor(cls, manager, fn, key):
impl = manager[key].impl
if impl.collection:
def _set_callable(state, dict_, row):
if 'callables' not in state.__dict__:
state.callables = {}
old = dict_.pop(key, None)
if old is not None:
impl._invalidate_collection(old)
state.callables[key] = fn
else:
def _set_callable(state, dict_, row):
if 'callables' not in state.__dict__:
state.callables = {}
state.callables[key] = fn
return _set_callable
def _expire(self, dict_, modified_set):
self.expired = True
if self.modified:
modified_set.discard(self)
self.committed_state.clear()
self.modified = False
self._strong_obj = None
if '_pending_mutations' in self.__dict__:
del self.__dict__['_pending_mutations']
if 'parents' in self.__dict__:
del self.__dict__['parents']
self.expired_attributes.update(
[impl.key for impl in self.manager._scalar_loader_impls
if impl.expire_missing or impl.key in dict_]
)
if self.callables:
for k in self.expired_attributes.intersection(self.callables):
del self.callables[k]
for k in self.manager._collection_impl_keys.intersection(dict_):
collection = dict_.pop(k)
collection._sa_adapter.invalidated = True
for key in self.manager._all_key_set.intersection(dict_):
del dict_[key]
self.manager.dispatch.expire(self, None)
def _expire_attributes(self, dict_, attribute_names, no_loader=False):
pending = self.__dict__.get('_pending_mutations', None)
callables = self.callables
for key in attribute_names:
impl = self.manager[key].impl
if impl.accepts_scalar_loader:
if no_loader and (
impl.callable_ or
key in callables
):
continue
self.expired_attributes.add(key)
if callables and key in callables:
del callables[key]
old = dict_.pop(key, None)
if impl.collection and old is not None:
impl._invalidate_collection(old)
self.committed_state.pop(key, None)
if pending:
pending.pop(key, None)
self.manager.dispatch.expire(self, attribute_names)
def _load_expired(self, state, passive):
"""__call__ allows the InstanceState to act as a deferred
callable for loading expired attributes, which is also
serializable (picklable).
"""
if not passive & SQL_OK:
return PASSIVE_NO_RESULT
toload = self.expired_attributes.\
intersection(self.unmodified)
self.manager.deferred_scalar_loader(self, toload)
# if the loader failed, or this
# instance state didn't have an identity,
# the attributes still might be in the callables
# dict. ensure they are removed.
self.expired_attributes.clear()
return ATTR_WAS_SET
@property
def unmodified(self):
"""Return the set of keys which have no uncommitted changes"""
return set(self.manager).difference(self.committed_state)
def unmodified_intersection(self, keys):
"""Return self.unmodified.intersection(keys)."""
return set(keys).intersection(self.manager).\
difference(self.committed_state)
@property
def unloaded(self):
"""Return the set of keys which do not have a loaded value.
This includes expired attributes and any other attribute that
was never populated or modified.
"""
return set(self.manager).\
difference(self.committed_state).\
difference(self.dict)
@property
def unloaded_expirable(self):
"""Return the set of keys which do not have a loaded value.
This includes expired attributes and any other attribute that
was never populated or modified.
"""
return self.unloaded.intersection(
attr for attr in self.manager
if self.manager[attr].impl.expire_missing)
@property
def _unloaded_non_object(self):
return self.unloaded.intersection(
attr for attr in self.manager
if self.manager[attr].impl.accepts_scalar_loader
)
def _instance_dict(self):
return None
def _modified_event(
self, dict_, attr, previous, collection=False, is_userland=False):
if attr:
if not attr.send_modified_events:
return
if is_userland and attr.key not in dict_:
raise sa_exc.InvalidRequestError(
"Can't flag attribute '%s' modified; it's not present in "
"the object state" % attr.key)
if attr.key not in self.committed_state or is_userland:
if collection:
if previous is NEVER_SET:
if attr.key in dict_:
previous = dict_[attr.key]
if previous not in (None, NO_VALUE, NEVER_SET):
previous = attr.copy(previous)
self.committed_state[attr.key] = previous
# assert self._strong_obj is None or self.modified
if (self.session_id and self._strong_obj is None) \
or not self.modified:
self.modified = True
instance_dict = self._instance_dict()
if instance_dict:
instance_dict._modified.add(self)
# only create _strong_obj link if attached
# to a session
inst = self.obj()
if self.session_id:
self._strong_obj = inst
if inst is None and attr:
raise orm_exc.ObjectDereferencedError(
"Can't emit change event for attribute '%s' - "
"parent object of type %s has been garbage "
"collected."
% (
self.manager[attr.key],
base.state_class_str(self)
))
def _commit(self, dict_, keys):
"""Commit attributes.
This is used by a partial-attribute load operation to mark committed
those attributes which were refreshed from the database.
Attributes marked as "expired" can potentially remain "expired" after
this step if a value was not populated in state.dict.
"""
for key in keys:
self.committed_state.pop(key, None)
self.expired = False
self.expired_attributes.difference_update(
set(keys).intersection(dict_))
# the per-keys commit removes object-level callables,
# while that of commit_all does not. it's not clear
# if this behavior has a clear rationale, however tests do
# ensure this is what it does.
if self.callables:
for key in set(self.callables).\
intersection(keys).\
intersection(dict_):
del self.callables[key]
def _commit_all(self, dict_, instance_dict=None):
"""commit all attributes unconditionally.
This is used after a flush() or a full load/refresh
to remove all pending state from the instance.
- all attributes are marked as "committed"
- the "strong dirty reference" is removed
- the "modified" flag is set to False
- any "expired" markers for scalar attributes loaded are removed.
- lazy load callables for objects / collections *stay*
Attributes marked as "expired" can potentially remain
"expired" after this step if a value was not populated in state.dict.
"""
self._commit_all_states([(self, dict_)], instance_dict)
@classmethod
def _commit_all_states(self, iter, instance_dict=None):
"""Mass / highly inlined version of commit_all()."""
for state, dict_ in iter:
state_dict = state.__dict__
state.committed_state.clear()
if '_pending_mutations' in state_dict:
del state_dict['_pending_mutations']
state.expired_attributes.difference_update(dict_)
if instance_dict and state.modified:
instance_dict._modified.discard(state)
state.modified = state.expired = False
state._strong_obj = None
class AttributeState(object):
"""Provide an inspection interface corresponding
to a particular attribute on a particular mapped object.
The :class:`.AttributeState` object is accessed
via the :attr:`.InstanceState.attrs` collection
of a particular :class:`.InstanceState`::
from sqlalchemy import inspect
insp = inspect(some_mapped_object)
attr_state = insp.attrs.some_attribute
"""
def __init__(self, state, key):
self.state = state
self.key = key
@property
def loaded_value(self):
"""The current value of this attribute as loaded from the database.
If the value has not been loaded, or is otherwise not present
in the object's dictionary, returns NO_VALUE.
"""
return self.state.dict.get(self.key, NO_VALUE)
@property
def value(self):
"""Return the value of this attribute.
This operation is equivalent to accessing the object's
attribute directly or via ``getattr()``, and will fire
off any pending loader callables if needed.
"""
return self.state.manager[self.key].__get__(
self.state.obj(), self.state.class_)
@property
def history(self):
"""Return the current pre-flush change history for
this attribute, via the :class:`.History` interface.
This method will **not** emit loader callables if the value of the
attribute is unloaded.
.. seealso::
:meth:`.AttributeState.load_history` - retrieve history
using loader callables if the value is not locally present.
:func:`.attributes.get_history` - underlying function
"""
return self.state.get_history(self.key,
PASSIVE_NO_INITIALIZE)
def load_history(self):
"""Return the current pre-flush change history for
this attribute, via the :class:`.History` interface.
This method **will** emit loader callables if the value of the
attribute is unloaded.
.. seealso::
:attr:`.AttributeState.history`
:func:`.attributes.get_history` - underlying function
.. versionadded:: 0.9.0
"""
return self.state.get_history(self.key,
PASSIVE_OFF ^ INIT_OK)
class PendingCollection(object):
"""A writable placeholder for an unloaded collection.
Stores items appended to and removed from a collection that has not yet
been loaded. When the collection is loaded, the changes stored in
PendingCollection are applied to it to produce the final result.
"""
def __init__(self):
self.deleted_items = util.IdentitySet()
self.added_items = util.OrderedIdentitySet()
def append(self, value):
if value in self.deleted_items:
self.deleted_items.remove(value)
else:
self.added_items.add(value)
def remove(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
|
the-stack_106_19617
|
import random as rd
lst=[]
lst2=[]
numberlst=[]
variationArray=[]
weightl=[]
#READING INPUT FROM FILE
with open('F:\\CSE422\\New folder\\genetic.txt') as file:
lst=file.read().split()[1:]
#LIST HANDELING
def listAdjust():
for item in lst:
if item=='l' :
lst2.append('l')
lst.remove(item)
elif item=='d':
lst2.append('d')
lst.remove(item)
iterate=len(lst2)
return iterate
#GENERATE INITIAL POPULATION
def generate_popuation(iterate):
population_size=2**len(lst2)-1
check=1
while check<=population_size:
variation=""
for j in range (iterate):
variant=str(rd.randrange(0,2))
variation+=variant
if variation not in variationArray:
variationArray.append(variation)
check=check+1
variationArray.sort()
variationArray.pop(0)
# print("This is Population Generation")
# print(variationArray)
return population_size
#FITNESS CALCULATION
def FitnessGeneration():
weight=[]
value='0'
for i in variationArray:
weightv=0
iterator=0
for j in i:
if(j=='1') and (lst2[iterator]=='l'):
weightv=weightv-int(lst[iterator])
elif (j=='1') and (lst2[iterator]=='d'):
weightv=weightv+int(lst[iterator])
iterator+=1
weight.append(weightv)
if(weightv==0):
value= i
weightl=weight
#print(weightl)
return value
#PARENTSELECTION
def parentSelection():
parent=variationArray
#CROSSOVER
def crossover(iterate,population_size):
j=0
i=0
#print(f'{"Variation len"}{len(variationArray)}')
if(len(variationArray)%2==0):
#print("WOK")
while i <=(population_size-1):
j=i+1
if(j<=population_size-1):
# print(f'{variationArray[i]}{" "}{variationArray[j]}' ,end=" ")
value=rd.randrange(0, iterate)
temp=variationArray[i]
p1=variationArray[i]
p2=variationArray[j]
p1=p1[:value]+p2[value:]
variationArray[i]=p1
p2=p2[:value]+temp[value:]
variationArray[j]=p2
i=j+1
elif(len(variationArray)%2!=0):
#print("WWOK")
while i <=(population_size-1):
j=i+1
if(i==(population_size-1)):
#print("WWOK")
j=i-1
value=rd.randrange(0, iterate)
print(f'{"Randrange "}{value}')
temp=variationArray[i]
p1=variationArray[i]
p2=variationArray[j]
p1=p1[:value]+p2[value:]
variationArray[i]=p1
p2=p2[:value]+temp[value:]
variationArray[j]=p2
break
elif(j<=population_size-1):
# print(f'{variationArray[i]}{" "}{variationArray[j]}' ,end=" ")
value=rd.randrange(0, iterate)
temp=variationArray[i]
p1=variationArray[i]
p2=variationArray[j]
p1=p1[:value]+p2[value:]
variationArray[i]=p1
p2=p2[:value]+temp[value:]
variationArray[j]=p2
i=j+1
#print(variationArray)
#print(f'{"Variation len"}{len(variationArray)}')
variationArray.sort()
if(int(variationArray[0])==0):
variationArray.pop(0)
#print("This is Crossover")
#Mutation
def Mutation(iterate):
bit_flip=rd.randrange(0, iterate)
# print(f'{"bit_flip "}{bit_flip}')
i=0
while(i<len(variationArray)):
l=variationArray[i]
if(l[bit_flip]=='1'):
l=l[:bit_flip]+'0'+l[bit_flip+1:]
variationArray[i]=l
elif(l[bit_flip]=='0'):
l=l[:bit_flip]+'1'+l[bit_flip+1:]
variationArray[i]=l
i=i+1
variationArray.sort()
if(int(variationArray[0])==0):
variationArray.pop(0)
#print("After Mutation")
#print(variationArray)
# x=listAdjust()
# population_size=generate_popuation(x)
# l=FitnessGeneration()
# print(f'{"Hello "}{l}')
# crossover(x,population_size)
# l=FitnessGeneration()
# print(f'{"Hello "}{l}')
# Mutation(x)
# l=FitnessGeneration()
# print(f'{"Hello "}{l}')
#This is Method
def geneticAlgoMain():
parentSelection()
x=listAdjust()
population_size=generate_popuation(x)
l=FitnessGeneration()
cross=0
mute=0
while(True):
if(l=='0' and cross==0):
crossover(x,population_size)
l=FitnessGeneration()
cross=cross+1
if(len(l)>0 and int(l)!=0):
#print("Cross")
print(f'{"Output "}{l}')
break
elif(l=='0' and mute==0):
Mutation(x)
l=FitnessGeneration()
mute=mute+1
if(len(l)>0 and int(l)!=0):
#print("Mute")
print(f'{"Output "}{l}')
break
elif(len(l)>0 and int(l)!=0):
print(f'{"Output "}{l}')
break
elif(mute>0 and cross>0):
print(f'{"Output "}{-1}')
break
geneticAlgoMain()
|
the-stack_106_19619
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.launcher.PiratesDummyLauncher
from otp.launcher.DummyLauncherBase import DummyLauncherBase
from pirates.launcher.PiratesQuickLauncher import PiratesQuickLauncher
class PiratesDummyLauncher(DummyLauncherBase, PiratesQuickLauncher):
__module__ = __name__
def __init__(self):
DummyLauncherBase.__init__(self)
self.setPhaseComplete(1, 100)
self.setPhaseComplete(2, 100)
self.firstPhase = 3
self.startFakeDownload()
|
the-stack_106_19620
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Shunta Saito
# https://github.com/mitmul/chainer-faster-rcnn/
import chainer
import chainer.functions as F
import chainer.links as L
class VGG16(chainer.Chain):
def __init__(self, train=False):
super(VGG16, self).__init__()
self.trunk = [
('conv1_1', L.Convolution2D(3, 64, 3, 1, 1)),
('relu1_1', F.ReLU()),
('conv1_2', L.Convolution2D(64, 64, 3, 1, 1)),
('relu1_2', F.ReLU()),
('pool1', F.MaxPooling2D(2, 2)),
('conv2_1', L.Convolution2D(64, 128, 3, 1, 1)),
('relu2_1', F.ReLU()),
('conv2_2', L.Convolution2D(128, 128, 3, 1, 1)),
('relu2_2', F.ReLU()),
('pool2', F.MaxPooling2D(2, 2)),
('conv3_1', L.Convolution2D(128, 256, 3, 1, 1)),
('relu3_1', F.ReLU()),
('conv3_2', L.Convolution2D(256, 256, 3, 1, 1)),
('relu3_2', F.ReLU()),
('conv3_3', L.Convolution2D(256, 256, 3, 1, 1)),
('relu3_3', F.ReLU()),
('pool3', F.MaxPooling2D(2, 2)),
('conv4_1', L.Convolution2D(256, 512, 3, 1, 1)),
('relu4_1', F.ReLU()),
('conv4_2', L.Convolution2D(512, 512, 3, 1, 1)),
('relu4_2', F.ReLU()),
('conv4_3', L.Convolution2D(512, 512, 3, 1, 1)),
('relu4_3', F.ReLU()),
('pool4', F.MaxPooling2D(2, 2)),
('conv5_1', L.Convolution2D(512, 512, 3, 1, 1)),
('relu5_1', F.ReLU()),
('conv5_2', L.Convolution2D(512, 512, 3, 1, 1)),
('relu5_2', F.ReLU()),
('conv5_3', L.Convolution2D(512, 512, 3, 1, 1)),
('relu5_3', F.ReLU()),
('rpn_conv_3x3', L.Convolution2D(512, 512, 3, 1, 1)),
('rpn_relu_3x3', F.ReLU()),
]
for name, link in self.trunk:
if 'conv' in name:
self.add_link(name, link)
def __call__(self, x):
for name, f in self.trunk:
x = (getattr(self, name) if 'conv' in name else f)(x)
if 'relu5_3' in name:
self.feature = x
return x
|
the-stack_106_19624
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
from knack.log import get_logger
from ._constants import get_managed_sku, get_premium_sku
from ._utils import (
get_registry_by_name,
validate_managed_registry,
validate_sku_update,
get_resource_group_name_by_registry_name,
user_confirmation
)
from ._docker_utils import get_login_credentials, EMPTY_GUID
from .network_rule import NETWORK_RULE_NOT_SUPPORTED
logger = get_logger(__name__)
DEF_DIAG_SETTINGS_NAME_TEMPLATE = '{}-diagnostic-settings'
SYSTEM_ASSIGNED_IDENTITY_ALIAS = '[system]'
def acr_check_name(client, registry_name):
return client.check_name_availability(registry_name)
def acr_list(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def acr_create(cmd,
client,
registry_name,
resource_group_name,
sku,
location=None,
admin_enabled=False,
default_action=None,
tags=None,
workspace=None,
identity=None,
key_encryption_key=None):
if default_action and sku not in get_premium_sku(cmd):
raise CLIError(NETWORK_RULE_NOT_SUPPORTED)
if sku not in get_managed_sku(cmd):
raise CLIError("Classic SKU is no longer supported. Please select a managed SKU.")
Registry, Sku, NetworkRuleSet = cmd.get_models('Registry', 'Sku', 'NetworkRuleSet')
registry = Registry(location=location, sku=Sku(name=sku), admin_user_enabled=admin_enabled, tags=tags)
if default_action:
registry.network_rule_set = NetworkRuleSet(default_action=default_action)
if identity or key_encryption_key:
_configure_cmk(cmd, registry, resource_group_name, identity, key_encryption_key)
lro_poller = client.create(resource_group_name, registry_name, registry)
if workspace:
from msrestazure.tools import is_valid_resource_id, resource_id
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.commands.client_factory import get_subscription_id
acr = LongRunningOperation(cmd.cli_ctx)(lro_poller)
if not is_valid_resource_id(workspace):
workspace = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='microsoft.OperationalInsights',
type='workspaces',
name=workspace)
_create_diagnostic_settings(cmd.cli_ctx, acr, workspace)
return acr
return lro_poller
def acr_delete(cmd, client, registry_name, resource_group_name=None, yes=False):
user_confirmation("Are you sure you want to delete the registry '{}'?".format(registry_name), yes)
resource_group_name = get_resource_group_name_by_registry_name(cmd.cli_ctx, registry_name, resource_group_name)
return client.delete(resource_group_name, registry_name)
def acr_show(cmd, client, registry_name, resource_group_name=None):
resource_group_name = get_resource_group_name_by_registry_name(cmd.cli_ctx, registry_name, resource_group_name)
return client.get(resource_group_name, registry_name)
def acr_update_custom(cmd,
instance,
sku=None,
admin_enabled=None,
default_action=None,
tags=None):
if sku is not None:
Sku = cmd.get_models('Sku')
instance.sku = Sku(name=sku)
if admin_enabled is not None:
instance.admin_user_enabled = admin_enabled
if tags is not None:
instance.tags = tags
if default_action is not None:
NetworkRuleSet = cmd.get_models('NetworkRuleSet')
instance.network_rule_set = NetworkRuleSet(default_action=default_action)
return instance
def acr_update_get(cmd):
"""Returns an empty RegistryUpdateParameters object.
"""
RegistryUpdateParameters = cmd.get_models('RegistryUpdateParameters')
return RegistryUpdateParameters()
def acr_update_set(cmd,
client,
registry_name,
resource_group_name=None,
parameters=None):
registry, resource_group_name = get_registry_by_name(cmd.cli_ctx, registry_name, resource_group_name)
if parameters.network_rule_set and registry.sku.name not in get_premium_sku(cmd):
raise CLIError(NETWORK_RULE_NOT_SUPPORTED)
validate_sku_update(cmd, registry.sku.name, parameters.sku)
return client.update(resource_group_name, registry_name, parameters)
def acr_login(cmd,
registry_name,
resource_group_name=None, # pylint: disable=unused-argument
tenant_suffix=None,
username=None,
password=None,
expose_token=False):
if expose_token:
login_server, _, password = get_login_credentials(
cmd=cmd,
registry_name=registry_name,
tenant_suffix=tenant_suffix,
username=username,
password=password)
logger.warning("You can perform manual login using the provided access token below, "
"for example: 'docker login loginServer -u %s -p accessToken'", EMPTY_GUID)
token_info = {
"loginServer": login_server,
"accessToken": password
}
return token_info
tips = "You may want to use 'az acr login -n {} --expose-token' to get an access token, " \
"which does not require Docker to be installed.".format(registry_name)
from azure.cli.core.util import in_cloud_console
if in_cloud_console():
raise CLIError("This command requires running the docker daemon, "
"which is not supported in Azure Cloud Shell. " + tips)
try:
docker_command, _ = get_docker_command()
except CLIError as e:
logger.warning(tips)
raise e
login_server, username, password = get_login_credentials(
cmd=cmd,
registry_name=registry_name,
tenant_suffix=tenant_suffix,
username=username,
password=password)
# warn casing difference caused by ACR normalizing to lower on login_server
parts = login_server.split('.')
if registry_name != parts[0] and registry_name.lower() == parts[0]:
logger.warning('Uppercase characters are detected in the registry name. When using its server url in '
'docker commands, to avoid authentication errors, use all lowercase.')
from subprocess import PIPE, Popen
p = Popen([docker_command, "login",
"--username", username,
"--password", password,
login_server], stderr=PIPE)
_, stderr = p.communicate()
return_code = p.returncode
if stderr:
if b'error storing credentials' in stderr and b'stub received bad data' in stderr \
and _check_wincred(login_server):
# Retry once after disabling wincred
p = Popen([docker_command, "login",
"--username", username,
"--password", password,
login_server])
p.wait()
else:
stderr_messages = stderr.decode()
# Dismiss the '--password-stdin' warning
if b'--password-stdin' in stderr:
errors = [err for err in stderr_messages.split('\n') if err and '--password-stdin' not in err]
# Will not raise CLIError if there is no error other than '--password-stdin'
if not errors:
return None
stderr_messages = '\n'.join(errors)
logger.warning(stderr_messages)
# Raise error only if docker returns non-zero
if return_code != 0:
raise CLIError('Login failed.')
return None
def acr_show_usage(cmd, client, registry_name, resource_group_name=None):
_, resource_group_name = validate_managed_registry(cmd,
registry_name,
resource_group_name,
"Usage is only supported for managed registries.")
return client.list_usages(resource_group_name, registry_name)
def get_docker_command(is_diagnostics_context=False):
from ._errors import DOCKER_COMMAND_ERROR, DOCKER_DAEMON_ERROR
docker_command = 'docker'
from subprocess import PIPE, Popen, CalledProcessError
try:
p = Popen([docker_command, "ps"], stdout=PIPE, stderr=PIPE)
_, stderr = p.communicate()
except OSError as e:
logger.debug("Could not run '%s' command. Exception: %s", docker_command, str(e))
# The executable may not be discoverable in WSL so retry *.exe once
try:
docker_command = 'docker.exe'
p = Popen([docker_command, "ps"], stdout=PIPE, stderr=PIPE)
_, stderr = p.communicate()
except OSError as inner:
logger.debug("Could not run '%s' command. Exception: %s", docker_command, str(inner))
if is_diagnostics_context:
return None, DOCKER_COMMAND_ERROR
raise CLIError(DOCKER_COMMAND_ERROR.get_error_message())
except CalledProcessError as inner:
logger.debug("Could not run '%s' command. Exception: %s", docker_command, str(inner))
if is_diagnostics_context:
return docker_command, DOCKER_DAEMON_ERROR
raise CLIError(DOCKER_DAEMON_ERROR.get_error_message())
except CalledProcessError as e:
logger.debug("Could not run '%s' command. Exception: %s", docker_command, str(e))
if is_diagnostics_context:
return docker_command, DOCKER_DAEMON_ERROR
raise CLIError(DOCKER_DAEMON_ERROR.get_error_message())
if stderr:
if is_diagnostics_context:
return None, DOCKER_COMMAND_ERROR.set_error_message(stderr.decode())
raise CLIError(DOCKER_COMMAND_ERROR.set_error_message(stderr.decode()).get_error_message())
return docker_command, None
def _check_wincred(login_server):
import platform
if platform.system() == 'Windows':
import json
from os.path import expanduser, isfile, join
docker_directory = join(expanduser('~'), '.docker')
config_path = join(docker_directory, 'config.json')
logger.debug("Docker config file path %s", config_path)
if isfile(config_path):
with open(config_path) as input_file:
content = json.load(input_file)
input_file.close()
wincred = content.pop('credsStore', None)
if wincred and wincred.lower() == 'wincred':
# Ask for confirmation
from knack.prompting import prompt_y_n, NoTTYException
message = "This operation will disable wincred and use file system to store docker credentials." \
" All registries that are currently logged in will be logged out." \
"\nAre you sure you want to continue?"
try:
if prompt_y_n(message):
with open(config_path, 'w') as output_file:
json.dump(content, output_file, indent=4)
output_file.close()
return True
return False
except NoTTYException:
return False
# Don't update config file or retry as this doesn't seem to be a wincred issue
return False
import os
content = {
"auths": {
login_server: {}
}
}
try:
os.makedirs(docker_directory)
except OSError as e:
logger.debug("Could not create docker directory '%s'. Exception: %s", docker_directory, str(e))
with open(config_path, 'w') as output_file:
json.dump(content, output_file, indent=4)
output_file.close()
return True
return False
def _create_diagnostic_settings(cli_ctx, acr, workspace):
from azure.mgmt.monitor import MonitorManagementClient
from azure.mgmt.monitor.models import (DiagnosticSettingsResource, RetentionPolicy,
LogSettings, MetricSettings)
from azure.cli.core.commands.client_factory import get_mgmt_service_client
client = get_mgmt_service_client(cli_ctx, MonitorManagementClient)
def_retention_policy = RetentionPolicy(enabled=True, days=0)
logs = [
LogSettings(enabled=True, category="ContainerRegistryRepositoryEvents", retention_policy=def_retention_policy),
LogSettings(enabled=True, category="ContainerRegistryLoginEvents", retention_policy=def_retention_policy)
]
metrics = [MetricSettings(enabled=True, category="AllMetrics", retention_policy=def_retention_policy)]
parameters = DiagnosticSettingsResource(workspace_id=workspace, metrics=metrics, logs=logs)
client.diagnostic_settings.create_or_update(resource_uri=acr.id, parameters=parameters,
name=DEF_DIAG_SETTINGS_NAME_TEMPLATE.format(acr.name))
def _configure_cmk(cmd, registry, resource_group_name, identity, key_encryption_key):
from azure.cli.core.commands.client_factory import get_subscription_id
if bool(identity) != bool(key_encryption_key):
raise CLIError("Usage error: --identity and --key-encryption-key must be both supplied")
identity = _ensure_identity_resource_id(subscription_id=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
resource=identity)
identity_client_id = _resolve_identity_client_id(cmd.cli_ctx, identity)
KeyVaultProperties, EncryptionProperty = cmd.get_models('KeyVaultProperties', 'EncryptionProperty')
registry.encryption = EncryptionProperty(status='enabled', key_vault_properties=KeyVaultProperties(
key_identifier=key_encryption_key, identity=identity_client_id))
ResourceIdentityType, IdentityProperties = cmd.get_models('ResourceIdentityType', 'IdentityProperties')
registry.identity = IdentityProperties(type=ResourceIdentityType.user_assigned,
user_assigned_identities={identity: {}})
def assign_identity(cmd, client, registry_name, identities, resource_group_name=None):
from azure.cli.core.commands.client_factory import get_subscription_id
assign_system_identity, assign_user_identities = _analyze_identities(identities)
registry, resource_group_name = get_registry_by_name(cmd.cli_ctx, registry_name, resource_group_name)
IdentityProperties, ResourceIdentityType = cmd.get_models('IdentityProperties', 'ResourceIdentityType')
# ensure registry.identity is set and is of type IdentityProperties
registry.identity = registry.identity or IdentityProperties(type=ResourceIdentityType.none)
if assign_system_identity and registry.identity.type != ResourceIdentityType.system_assigned:
registry.identity.type = (ResourceIdentityType.system_assigned
if registry.identity.type == ResourceIdentityType.none
else ResourceIdentityType.system_assigned_user_assigned)
if assign_user_identities and registry.identity.type != ResourceIdentityType.user_assigned:
registry.identity.type = (ResourceIdentityType.user_assigned
if registry.identity.type == ResourceIdentityType.none
else ResourceIdentityType.system_assigned_user_assigned)
if assign_user_identities:
subscription_id = get_subscription_id(cmd.cli_ctx)
registry.identity.user_assigned_identities = registry.identity.user_assigned_identities or {}
for r in assign_user_identities:
r = _ensure_identity_resource_id(subscription_id, resource_group_name, r)
registry.identity.user_assigned_identities[r] = {}
return client.update(resource_group_name, registry_name, registry)
def show_identity(cmd, client, registry_name, resource_group_name=None):
return acr_show(cmd, client, registry_name, resource_group_name).identity
def remove_identity(cmd, client, registry_name, identities, resource_group_name=None):
from azure.cli.core.commands.client_factory import get_subscription_id
remove_system_identity, remove_user_identities = _analyze_identities(identities)
registry, resource_group_name = get_registry_by_name(cmd.cli_ctx, registry_name, resource_group_name)
ResourceIdentityType = cmd.get_models('ResourceIdentityType')
# if registry.identity is not set or is none, return the registry.
if not registry.identity or registry.identity.type == ResourceIdentityType.none:
raise CLIError("The registry {} has no system or user assigned identities.".format(registry_name))
if remove_system_identity:
if registry.identity.type == ResourceIdentityType.user_assigned:
raise CLIError("The registry does not have a system identity assigned.")
registry.identity.type = (ResourceIdentityType.none
if registry.identity.type == ResourceIdentityType.system_assigned
else ResourceIdentityType.user_assigned)
if remove_user_identities:
subscription_id = get_subscription_id(cmd.cli_ctx)
registry.identity.user_assigned_identities = registry.identity.user_assigned_identities or {}
for id_to_remove in remove_user_identities:
original_identity = id_to_remove
was_removed = False
id_to_remove = _ensure_identity_resource_id(subscription_id, resource_group_name, id_to_remove)
# remove identity if it exists even if case is different
for existing_identity in registry.identity.user_assigned_identities.copy():
if existing_identity.lower() == id_to_remove.lower():
registry.identity.user_assigned_identities.pop(existing_identity)
was_removed = True
break
if not was_removed:
raise CLIError("The registry does not have specified user identity '{}' assigned, "
"so it cannot be removed.".format(original_identity))
# all user assigned identities are gone
if not registry.identity.user_assigned_identities:
registry.identity.user_assigned_identities = None # required for put
registry.identity.type = (ResourceIdentityType.none
if registry.identity.type == ResourceIdentityType.user_assigned
else ResourceIdentityType.system_assigned)
# this method should be named create_or_update as it calls the PUT method
return client.create(resource_group_name, registry_name, registry)
def show_encryption(cmd, client, registry_name, resource_group_name=None):
return acr_show(cmd, client, registry_name, resource_group_name).encryption
def rotate_key(cmd, client, registry_name, identity=None, key_encryption_key=None, resource_group_name=None):
registry, resource_group_name = get_registry_by_name(cmd.cli_ctx, registry_name, resource_group_name)
if key_encryption_key:
registry.encryption.key_vault_properties.key_identifier = key_encryption_key
if identity:
try:
import uuid
uuid.UUID(identity)
client_id = identity
except ValueError:
from azure.cli.core.commands.client_factory import get_subscription_id
if identity == SYSTEM_ASSIGNED_IDENTITY_ALIAS:
client_id = 'system' # reserved word on ACR service
else:
identity = _ensure_identity_resource_id(subscription_id=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
resource=identity)
client_id = _resolve_identity_client_id(cmd.cli_ctx, identity)
registry.encryption.key_vault_properties.identity = client_id
return client.update(resource_group_name, registry_name, registry)
def _analyze_identities(identities):
identities = identities or []
return SYSTEM_ASSIGNED_IDENTITY_ALIAS in identities, [x for x in identities if x != SYSTEM_ASSIGNED_IDENTITY_ALIAS]
def _ensure_identity_resource_id(subscription_id, resource_group, resource):
from msrestazure.tools import resource_id, is_valid_resource_id
if is_valid_resource_id(resource):
return resource
return resource_id(subscription=subscription_id,
resource_group=resource_group,
namespace='Microsoft.ManagedIdentity',
type='userAssignedIdentities',
name=resource)
def _resolve_identity_client_id(cli_ctx, managed_identity_resource_id):
from azure.mgmt.msi import ManagedServiceIdentityClient
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from msrestazure.tools import parse_resource_id
res = parse_resource_id(managed_identity_resource_id)
client = get_mgmt_service_client(cli_ctx, ManagedServiceIdentityClient, subscription_id=res['subscription'])
return client.user_assigned_identities.get(res['resource_group'], res['name']).client_id
def list_private_link_resources(cmd, client, registry_name, resource_group_name=None):
resource_group_name = get_resource_group_name_by_registry_name(cmd.cli_ctx, registry_name, resource_group_name)
return client.list_private_link_resources(resource_group_name, registry_name)
|
the-stack_106_19625
|
from datetime import datetime
from django.utils.translation import ugettext_lazy as _
from django_prbac.utils import has_privilege as prbac_has_privilege
from corehq import feature_previews, toggles
from corehq.apps.app_manager.exceptions import AddOnNotFoundException
from corehq.apps.app_manager.models import AdvancedModule, Module, ShadowModule
from corehq.apps.domain.models import Domain
from corehq.privileges import CHILD_CASES, LOOKUP_TABLES
# Similar to feature flags and/or feature previews, but specific to an individual application
# and with the additional notion of a feature being "in use" in a specific module or form
# even if the add-on isn't enabled.
class AddOn(object):
def __init__(self, name, description, help_link=None, privilege=None,
used_in_module=None, used_in_form=None, upgrade_text=None):
self.name = name
self.description = description
self.help_link = help_link
self.privilege = privilege
self.upgrade_text = upgrade_text
self.used_in_module = used_in_module if used_in_module else lambda m: False
self.used_in_form = used_in_form if used_in_form else lambda f: False
def has_privilege(self, request):
if not self.privilege:
return True
return prbac_has_privilege(request, self.privilege)
def _uses_case_list_menu_item(module):
if getattr(module, 'case_list', False) and module.case_list.show:
return True
if getattr(module, 'task_list', False) and module.task_list.show:
return True
if getattr(module, 'referral_list', False) and module.referral_list.show:
return True
return False
def _uses_conditional_form_actions(form):
if form.form_type != 'module_form':
# Don't bother restricting non-basic forms
return True
return form.actions.open_case.condition.type == 'if' or form.actions.close_case.condition.type == 'if'
def _uses_detail_format(module, column_format):
details = []
if isinstance(module, Module) or isinstance(module, ShadowModule):
details = [module.case_details, module.ref_details]
elif isinstance(module, AdvancedModule):
details = [module.case_details, module.product_details]
return any([c.format for d in details for c in d.short.columns + d.long.columns if c.format == column_format])
def _uses_calculated_property(module):
def _column_is_calculated_property(col):
return col.useXpathExpression
return hasattr(module, 'case_details') and (
any(_column_is_calculated_property(column) for column in module.case_details.short.columns)
or any(_column_is_calculated_property(column) for column in module.case_details.long.columns)
)
# Apps that were created before add-ons were released get the original set of add-ons enabled
# automatically, so they wouldn't get functionality suddenly turned off during the release.
def _grandfathered(slug, app):
if app.add_ons:
return False
release_date = datetime(2017, 7, 31, 20)
if slug not in [
"conditional_form_actions", "subcases", "case_list_menu_item", "enum_image", "menu_mode",
"register_from_case_list", "display_conditions", "conditional_enum", "calc_xpaths",
"advanced_itemsets", "case_detail_overwrite",
]:
return False
domain_obj = Domain.get_by_name(app.domain)
return (getattr(domain_obj, 'date_created') or datetime(2000, 1, 1)) < release_date
_ADD_ONS = {
"advanced_itemsets": AddOn(
name=feature_previews.VELLUM_ADVANCED_ITEMSETS.label,
description=feature_previews.VELLUM_ADVANCED_ITEMSETS.description,
privilege=LOOKUP_TABLES,
),
"calc_xpaths": AddOn(
name=feature_previews.CALC_XPATHS.label,
description=feature_previews.CALC_XPATHS.description,
used_in_module=_uses_calculated_property,
help_link=feature_previews.CALC_XPATHS.help_link,
),
"case_detail_overwrite": AddOn(
name=_("Case Detail Overwrite"),
description=_("Ability to overwrite one case list or detail's settings with another's. "
"Available in menu settings, in the actions tab."),
),
"case_list_menu_item": AddOn(
name=_("Case List Menu Item"),
description=_("Allows the mobile user to view the case list and case details without actually opening "
"a form. Available in menu settings."),
used_in_module=lambda m: _uses_case_list_menu_item(m),
),
"conditional_enum": AddOn(
name=feature_previews.CONDITIONAL_ENUM.label,
description=feature_previews.CONDITIONAL_ENUM.description,
used_in_module=lambda m: _uses_detail_format(m, 'conditional-enum'),
),
"conditional_form_actions": AddOn(
name=_('Case Conditions'),
description=_("Open or close a case only if a specific question has a particular answer. "
"Available in form settings, under Case Management."),
help_link="https://confluence.dimagi.com/display/commcarepublic/Case+Configuration",
used_in_form=lambda f: _uses_conditional_form_actions(f)
),
"display_conditions": AddOn(
name=_("Display Conditions"),
description=_("Write logic to show or hide forms and menus on the mobile device. "
"Available in form and menu settings."),
help_link="https://confluence.dimagi.com/display/commcarepublic/Application+Design",
used_in_form=lambda f: bool(getattr(f, 'form_filter', False)),
used_in_module=lambda m: bool(m.module_filter),
),
"enum_image": AddOn(
name=feature_previews.ENUM_IMAGE.label,
description=feature_previews.ENUM_IMAGE.description,
help_link=feature_previews.ENUM_IMAGE.help_link,
used_in_module=lambda m: _uses_detail_format(m, 'enum-image'),
),
"menu_mode": AddOn(
name=_("Menu Mode"),
description=_("Control whether a form's enclosing menu is displayed on the mobile device or not. "
"Available in menu settings."),
used_in_module=lambda m: getattr(m, 'put_in_root', False),
),
"register_from_case_list": AddOn(
name=_("Register from case list"),
description=_("Minimize duplicates by making registration forms available directly from the case list "
"on the mobile device. Availabe in menu settings."),
help_link="https://confluence.dimagi.com/pages/viewpage.action?pageId=30605985",
used_in_module=lambda m: m.case_list_form.form_id,
),
"subcases": AddOn(
name=_("Child Cases"),
description=_("Open other types of cases, linking them to the case that "
"created them. Available in form settings."),
help_link="https://confluence.dimagi.com/display/commcarepublic/Child+Cases",
used_in_form=lambda f: f.form_type != "module_form" or bool(f.actions.subcases),
privilege=CHILD_CASES,
upgrade_text=_("Child cases are not available on your subscription. "
"This feature is only available on our Pro plan or higher.")
),
"submenus": AddOn(
name=_("Sub Menus"),
description=_("Nest menus inside of other menus."),
help_link="https://confluence.dimagi.com/display/commcarepublic/Sub+Menus",
used_in_module=lambda m: hasattr(m, 'root_module_id') and m.root_module_id,
),
"empty_case_lists": AddOn(
name=_("New Case Lists Created Empty"),
description=_("When adding a new case list, don't include a registration and followup form."),
),
}
_LAYOUT = [
{
"slug": "case_management",
"collapse": False,
"name": _("Case Management"),
"description": _("Build more complex workflows"),
"slugs": ["conditional_form_actions", "empty_case_lists", "subcases"],
},
{
"slug": "mobile",
"collapse": True,
"name": _("Mobile Experience"),
"description": _("Improve the user experience of your mobile workers"),
"slugs": [
"case_list_menu_item",
"enum_image",
"menu_mode",
"register_from_case_list",
"submenus",
],
},
{
"slug": "xpath",
"collapse": True,
"name": _("Calculations"),
"description": _("Add logic to your app with XPath expressions"),
"slugs": ["display_conditions", "conditional_enum", "calc_xpaths", "advanced_itemsets"],
},
{
"slug": "efficiency",
"collapse": True,
"name": _("App Building Efficiency"),
"description": _("Tools to help build your apps faster"),
"slugs": ["case_detail_overwrite"],
},
]
# Determine whether or not UI should show a feature, based on
# availability and whether or not it's in use.
def show(slug, request, app, module=None, form=None):
if slug not in _ADD_ONS:
raise AddOnNotFoundException(slug)
add_on = _ADD_ONS[slug]
# Do not show if there's a required privilege missing
if not add_on.has_privilege(request) and add_on.upgrade_text is None:
return False
# Show if flag to enable all toggles is on
if toggles.ENABLE_ALL_ADD_ONS.enabled_for_request(request):
return True
if _grandfathered(slug, app):
return True
# Show if add-on has been enabled for app
show = slug in app.add_ons and app.add_ons[slug]
# Show if add-on is also a feature preview this domain has on
# (that has not been turned off for this app specifically)
if slug not in app.add_ons:
previews = feature_previews.previews_dict(app.domain)
if slug in previews:
show = show or previews[slug]
# Show if add-on is being used by the current form/module
if form:
show = show or add_on.used_in_form(form)
elif module:
show = show or add_on.used_in_module(module)
return show
# Get a slug => bool dictionary signifying which add-ons to display in UI
def get_dict(request, app, module=None, form=None):
init_app(request, app)
return {slug: show(slug, request, app, module, form) for slug in _ADD_ONS.keys()}
# Get a slug => bool dictionary signifying which add-ons have privileges
def get_privileges_dict(request):
return {slug: _ADD_ONS[slug].has_privilege(request) for slug in _ADD_ONS.keys()}
# Get add-ons for display in settings UI
def get_layout(request):
all_slugs = set(_ADD_ONS.keys())
layout_slugs = set([slug for section in _LAYOUT for slug in section['slugs']])
if all_slugs != layout_slugs:
difference = ", ".join(all_slugs ^ layout_slugs)
if all_slugs - layout_slugs:
raise AddOnNotFoundException("Add-ons not in layout: {}".format(difference))
raise AddOnNotFoundException("Add-ons in layout do not exist: {}".format(difference))
return [dict({'add_ons': [{
'slug': slug,
'name': _ADD_ONS[slug].name,
'description': _ADD_ONS[slug].description,
'help_link': _ADD_ONS[slug].help_link,
'upgrade_text': _ADD_ONS[slug].upgrade_text,
'show_upgrade': (not _ADD_ONS[slug].has_privilege(request)
and _ADD_ONS[slug].upgrade_text is not None),
} for slug in section['slugs']
if _ADD_ONS[slug].has_privilege(request)
or _ADD_ONS[slug].upgrade_text is not None]}, **section) for section in _LAYOUT]
# Lazily migrate an app that doesn't have any add_ons configured yet.
# Turns on any add-ons that map to feature previews, leaves the rest off.
def init_app(request, app):
if app.add_ons:
return
# Don't use previews_dict because it doesn't include disabled previews
previews = {p.slug: p.enabled(app.domain) for p in feature_previews.all_previews()}
for slug in _ADD_ONS.keys():
add_on = _ADD_ONS[slug]
enable = False
if add_on.has_privilege(request):
# Enable if it's an enabled preview
if slug in previews:
enable = previews[slug]
# Turn on if it's used anywhere
enable = enable or any([add_on.used_in_module(m) for m in app.modules])
enable = enable or any([add_on.used_in_form(f) for m in app.modules for f in m.forms])
enable = enable or _grandfathered(slug, app)
app.add_ons[slug] = enable
|
the-stack_106_19627
|
import matplotlib.pyplot as plt
import numpy as np
def plot_errors(policy_error, value_error):
"""
Method to plot the errors collected for each key in the value and policy errors
:param policy_error: Policy error as an array
:param value_error: Value error as an array
:return: None
"""
def _plot_data(param, value, error_type):
episodes = np.arange(len(value)) + 1
plt.plot(episodes, value)
plt.xlabel("Episodes/Iterations")
plt.ylabel(error_type)
plt.title("{} vs Episodes/Iterations\nfor {} environment, {} algorithm ".format(error_type, *param))
plt.show()
for key in value_error.keys():
value_data = value_error[key]
policy_data = policy_error[key]
_plot_data(key, value_data, "Value error (mse)")
_plot_data(key, policy_data, "Policy error")
|
the-stack_106_19628
|
"""
Ideia: Gerar todas as combinações de livros possívei e salvar o preço de cada combinação em uma lista,
aí foi sí ordernar a lista e somar os n primeiros.
complexidade: O(n^5)
Complexidade das funções do python:
.sort() -> O(n*log(n))
.reverse() -> O(n)
list() -> O(n)
map() -> O(n)
"""
def somar(*livros):
soma = 0
for i in livros: soma += i
return soma
p = list(map(int, input().split()[1:]))
m = list(map(int, input().split()[1:]))
q = list(map(int, input().split()[1:]))
f = list(map(int, input().split()[1:]))
b = list(map(int, input().split()[1:]))
n = int(input())
catalogos = []
for jp in p:
for jm in m:
for jq in q:
for jf in f:
for jb in b:
catalogos.append(somar(jp, jm, jq, jf, jb))
catalogos.sort()
catalogos.reverse()
print(somar(*catalogos[:n]))
|
the-stack_106_19629
|
# BY @Deonnn
""" Game of Thrones Dialogues That You Can Use In Everyday Situations
command .gotm
by @Deonnn
"""
import asyncio
import random
from telethon import events
@borg.on(events.NewMessage(pattern=r"\.gotm", outgoing=True))
async def _(event):
if event.fwd_from:
return
await event.edit("Thinking... 🤔")
await asyncio.sleep(2)
x = random.randrange(1, 30)
if x == 1:
await event.edit(
"[To your teachers on failing you in all your papers confidently, every time...](https://telegra.ph/file/431d178780f9bff353047.jpg)",
link_preview=True,
)
if x == 2:
await event.edit(
"[A shift from the mainstream darling, sweetheart, jaanu, and what not...](https://telegra.ph/file/6bbb86a6c7d2c4a61e102.jpg)",
link_preview=True,
)
if x == 3:
await event.edit(
"[To the guy who's friendzone-ing you...](https://telegra.ph/file/8930b05e9535e9b9b8229.jpg)",
link_preview=True,
)
if x == 4:
await event.edit(
"[When your friend asks for his money back...](https://telegra.ph/file/2df575ab38df5ce9dbf5e.jpg)",
link_preview=True,
)
if x == 5:
await event.edit(
"[A bad-ass reply to who do you think you are?](https://telegra.ph/file/3a35a0c37f4418da9f702.jpg)",
link_preview=True,
)
if x == 6:
await event.edit(
"[When the traffic police stops your car and asks for documents...](https://telegra.ph/file/52612d58d6a61315a4c3a.jpg)",
link_preview=True,
)
if x == 7:
await event.edit(
"[ When your friend asks about the food he/she just cooked and you don't want to break his/her heart...](https://telegra.ph/file/702df36088f5c26fef931.jpg)",
link_preview=True,
)
if x == 8:
await event.edit(
"[When you're out of words...](https://telegra.ph/file/ba748a74bcab4a1135d2a.jpg)",
link_preview=True,
)
if x == 9:
await event.edit(
"[When you realize your wallet is empty...](https://telegra.ph/file/a4508324b496d3d4580df.jpg)",
link_preview=True,
)
if x == 10:
await event.edit(
"[When shit is about to happen...](https://telegra.ph/file/e15d9d64f9f25e8d05f19.jpg)",
link_preview=True,
)
if x == 11:
await event.edit(
"[When that oversmart classmate shouts a wrong answer in class...](https://telegra.ph/file/1a225a2e4b7bfd7f7a809.jpg)",
link_preview=True,
)
if x == 12:
await event.edit(
"[When things go wrong in a big fat Indian wedding...](https://telegra.ph/file/db69e17e85bb444caca32.jpg)",
link_preview=True,
)
if x == 13:
await event.edit(
"[A perfect justification for breaking a promise...](https://telegra.ph/file/0b8fb8fb729d157844ac9.jpg)",
link_preview=True,
)
if x == 14:
await event.edit(
"[When your friend just won't stop LOL-ing on something silly you said...](https://telegra.ph/file/247fa54106c32318797ae.jpg)",
link_preview=True,
)
if x == 15:
await event.edit(
"[When someone makes a joke on you...](https://telegra.ph/file/2ee216651443524eaafcf.jpg)",
link_preview=True,
)
if x == 16:
await event.edit(
"[When your professor insults you in front of the class...](https://telegra.ph/file/a2dc7317627e514a8e180.jpg)",
link_preview=True,
)
if x == 17:
await event.edit(
"[When your job interviewer asks if you're nervous...](https://telegra.ph/file/9cc147d0bf8adbebf164b.jpg)",
link_preview=True,
)
if x == 18:
await event.edit(
"[When you're sick of someone complaining about the heat outside...](https://telegra.ph/file/9248635263c52b968f968.jpg)",
link_preview=True,
)
if x == 19:
await event.edit(
"[When your adda is occupied by outsiders...](https://telegra.ph/file/ef537007ba6d9d4cbd384.jpg)",
link_preview=True,
)
if x == 20:
await event.edit(
"[When you don't have the right words to motivate somebody...](https://telegra.ph/file/2c932d769ae4c5fbed368.jpg)",
link_preview=True,
)
if x == 21:
await event.edit(
"[When the bouncer won't let you and your group of friends in because you're all under-aged...](https://telegra.ph/file/6c8ca79f1e20ebd04391c.jpg)",
link_preview=True,
)
if x == 22:
await event.edit(
"[To the friend who wants you to take the fall for his actions...](https://telegra.ph/file/d4171b9bc9104b5d972d9.jpg)",
link_preview=True,
)
if x == 23:
await event.edit(
"[When that prick of a bully wouldn't take your words seriously...](https://telegra.ph/file/188d73bd24cf866d8d8d0.jpg)",
link_preview=True,
)
if x == 24:
await event.edit(
"[ When you're forced to go shopping/watch a football match with your partner...](https://telegra.ph/file/6e129f138c99c1886cb2b.jpg)",
link_preview=True,
)
if x == 25:
await event.edit(
"[To the large queue behind you after you get the last concert/movie ticket...](https://telegra.ph/file/2423f213dd4e4282a31ea.jpg)",
link_preview=True,
)
if x == 26:
await event.edit(
"[When your parents thought you'd fail but you prove them wrong...](https://telegra.ph/file/39cc5098466f622bf21e3.jpg)",
link_preview=True,
)
if x == 27:
await event.edit(
"[A justification for not voting!](https://telegra.ph/file/87d475a8f9a8350d2450e.jpg)",
link_preview=True,
)
if x == 28:
await event.edit(
"[When your partner expects you to do too many things...](https://telegra.ph/file/68bc768d36e08862bf94e.jpg)",
link_preview=True,
)
if x == 29:
await event.edit(
"[When your friends cancel on the plan you made at the last minute...](https://telegra.ph/file/960b58c8f625b17613307.jpg)",
link_preview=True,
)
if x == 30:
await event.edit(
"[For that friend of yours who does not like loud music and head banging...](https://telegra.ph/file/acbce070d3c52b921b2bd.jpg)",
link_preview=True,
)
|
the-stack_106_19630
|
"""Unit tests for read orientation inference module."""
from pathlib import Path
import pytest
from htsinfer import infer_read_orientation
test_files_dir = Path(__file__).parent.absolute() / "test_files"
file_1 = str(test_files_dir / "first_mate.fastq")
file_2 = str(test_files_dir / "second_mate.fastq")
fasta_human = str(test_files_dir / "transcripts_human.fa")
fasta_mixed = str(test_files_dir / "transcripts_mixed.fa")
fasta_no_orgs = str(test_files_dir / "transcripts_no_orgs.fa")
def _raise(exception) -> None:
"""General purpose exception raiser."""
raise exception
def file_len(fname):
"""Count lines in file."""
return sum(1 for line in open(fname))
class TestInfer:
"""Tests for the main function `infer()`."""
def test_single_file(self):
"""Function returns without errors."""
assert infer_read_orientation.infer(
fasta=fasta_human,
file_1=file_1,
) == "U"
def test_cannot_create_tmp_dir(self, monkeypatch):
"""Fails to create temporary directory."""
monkeypatch.setattr(
'tempfile.mkdtemp',
lambda *args, **kwargs: _raise(OSError),
)
with pytest.raises(OSError):
infer_read_orientation.infer(
fasta=fasta_human,
file_1=file_1,
)
def test_cannot_delete_tmp_dir(self, monkeypatch, tmp_path):
"""Fails to deleted temporary directory."""
monkeypatch.setattr(
'tempfile.mkdtemp',
lambda *args, **kwargs: str(tmp_path),
)
monkeypatch.setattr(
'shutil.rmtree',
lambda *args, **kwargs: _raise(OSError),
)
with pytest.raises(OSError):
infer_read_orientation.infer(
fasta=fasta_human,
file_1=file_1,
)
class TestSubsetFastaByOrgansim:
"""Test for function `subset_fasta_by_organism()`."""
def test_fasta_subsetting(self, tmp_path):
"""Writes FASTA records of specified organism only."""
fasta_out = str(tmp_path / "out.fa")
infer_read_orientation.subset_fasta_by_organism(
fasta_in=fasta_mixed,
fasta_out=fasta_out,
organism="hsapiens",
)
assert file_len(fasta_out) == file_len(fasta_human)
def test_no_orgs(self, tmp_path):
"""All FASTA records are skipped because organism information is
absent.
"""
fasta_out = str(tmp_path / "out.fa")
infer_read_orientation.subset_fasta_by_organism(
fasta_in=fasta_no_orgs,
fasta_out=str(tmp_path / "out.fa"),
organism="hsapiens",
)
assert file_len(fasta_out) == 0
def test_invalid_fasta(self, tmp_path):
"""No FASTA records are written because input file is not of FASTA
format.
"""
fasta_out = str(tmp_path / "out.fa")
infer_read_orientation.subset_fasta_by_organism(
fasta_in=file_1,
fasta_out=str(tmp_path / "out.fa"),
organism="hsapiens",
)
assert file_len(fasta_out) == 0
def test_fasta_na(self, monkeypatch, tmp_path):
"""Input FASTA file cannot be opened."""
fasta_out = str(tmp_path / "out.fa")
monkeypatch.setattr(
'Bio.SeqIO.parse',
lambda *args, **kwargs: _raise(OSError),
)
with pytest.raises(OSError):
infer_read_orientation.subset_fasta_by_organism(
fasta_in=fasta_human,
fasta_out=fasta_out,
organism="hsapiens",
)
def test_out_path_not_writable(self, monkeypatch, tmp_path):
"""Output FASTA file cannot be written."""
fasta_out = str(tmp_path / "out.fa")
monkeypatch.setattr(
'Bio.SeqIO.write',
lambda *args, **kwargs: _raise(OSError),
)
with pytest.raises(OSError):
infer_read_orientation.subset_fasta_by_organism(
fasta_in=fasta_human,
fasta_out=fasta_out,
organism="hsapiens",
)
|
the-stack_106_19631
|
from deephyper.benchmark import Problem
from candlepb.Combo.models.candle_mlp_9 import create_structure
# We create our Problem object with the Problem class, you don't have to name your Problem object 'Problem' it can be any name you want. You can also define different problems in the same module.
Problem = Problem()
# You define the create structure function. This function will return an object following the Structure interface. You can also have kwargs arguments such as 'num_cells' for this function.
Problem.add_dim('create_structure', {
'func': create_structure
})
# You define the hyperparameters used to train your generated models during the search.
Problem.add_dim('hyperparameters', {
'num_epochs': 1,
})
Problem.add_dim('load_data', {
'prop': 0.3
})
# Just to print your problem, to test its definition and imports in the current python environment.
if __name__ == '__main__':
print(Problem)
|
the-stack_106_19635
|
# --- Simple example of Langmuir oscillations in a uniform plasma
from pywarpx import picmi
constants = picmi.constants
##########################
# physics parameters
##########################
plasma_density = 1.e25
plasma_xmin = 0.
plasma_x_velocity = 0.1*constants.c
##########################
# numerics parameters
##########################
# --- Number of time steps
max_steps = 40
diagnostic_interval = 10
# --- Grid
nx = 64
ny = 64
nz = 64
xmin = -20.e-6
ymin = -20.e-6
zmin = -20.e-6
xmax = +20.e-6
ymax = +20.e-6
zmax = +20.e-6
number_per_cell_each_dim = [2,2,2]
##########################
# physics components
##########################
uniform_plasma = picmi.UniformDistribution(density = 1.e25,
upper_bound = [0., None, None],
directed_velocity = [0.1*constants.c, 0., 0.])
electrons = picmi.Species(particle_type='electron', name='electrons', initial_distribution=uniform_plasma)
##########################
# numerics components
##########################
grid = picmi.Cartesian3DGrid(number_of_cells = [nx, ny, nz],
lower_bound = [xmin, ymin, zmin],
upper_bound = [xmax, ymax, zmax],
lower_boundary_conditions = ['periodic', 'periodic', 'periodic'],
upper_boundary_conditions = ['periodic', 'periodic', 'periodic'],
moving_window_velocity = [0., 0., 0.],
warpx_max_grid_size = 32)
solver = picmi.ElectromagneticSolver(grid=grid, cfl=1.)
##########################
# diagnostics
##########################
field_diag1 = picmi.FieldDiagnostic(name = 'diag1',
grid = grid,
period = diagnostic_interval,
data_list = ['Ex', 'Jx'],
warpx_file_prefix = 'plotfiles/plt')
part_diag1 = picmi.ParticleDiagnostic(name = 'diag1',
period = diagnostic_interval,
species = [electrons],
data_list = ['weighting', 'ux', 'Ex'])
##########################
# simulation setup
##########################
sim = picmi.Simulation(solver = solver,
max_steps = max_steps,
verbose = 1,
warpx_current_deposition_algo = 'direct')
sim.add_species(electrons,
layout = picmi.GriddedLayout(n_macroparticle_per_cell=number_per_cell_each_dim, grid=grid))
sim.add_diagnostic(field_diag1)
sim.add_diagnostic(part_diag1)
##########################
# simulation run
##########################
# write_inputs will create an inputs file that can be used to run
# with the compiled version.
#sim.write_input_file(file_name = 'inputs_from_PICMI')
# Alternatively, sim.step will run WarpX, controlling it from Python
sim.step()
|
the-stack_106_19636
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
import pandas
from modin.data_management.utils import compute_chunksize
from modin.engines.base.io.file_dispatcher import FileDispatcher
class ColumnStoreDispatcher(FileDispatcher):
@classmethod
def call_deploy(cls, fname, col_partitions, **kwargs):
from modin.pandas import DEFAULT_NPARTITIONS
return np.array(
[
cls.deploy(
cls.parse,
DEFAULT_NPARTITIONS + 2,
dict(
fname=fname,
columns=cols,
num_splits=DEFAULT_NPARTITIONS,
**kwargs,
),
)
for cols in col_partitions
]
).T
@classmethod
def build_partition(cls, partition_ids, row_lengths, column_widths):
return np.array(
[
[
cls.frame_partition_cls(
partition_ids[i][j],
length=row_lengths[i],
width=column_widths[j],
)
for j in range(len(partition_ids[i]))
]
for i in range(len(partition_ids))
]
)
@classmethod
def build_index(cls, partition_ids):
from modin.pandas import DEFAULT_NPARTITIONS
index_len = cls.materialize(partition_ids[-2][0])
if isinstance(index_len, int):
index = pandas.RangeIndex(index_len)
else:
index = index_len
index_len = len(index)
index_chunksize = compute_chunksize(
pandas.DataFrame(index=index), DEFAULT_NPARTITIONS, axis=0
)
if index_chunksize > index_len:
row_lengths = [index_len] + [0 for _ in range(DEFAULT_NPARTITIONS - 1)]
else:
row_lengths = [
index_chunksize
if i != DEFAULT_NPARTITIONS - 1
else index_len - (index_chunksize * (DEFAULT_NPARTITIONS - 1))
for i in range(DEFAULT_NPARTITIONS)
]
return index, row_lengths
@classmethod
def build_columns(cls, columns):
from modin.pandas import DEFAULT_NPARTITIONS
column_splits = (
len(columns) // DEFAULT_NPARTITIONS
if len(columns) % DEFAULT_NPARTITIONS == 0
else len(columns) // DEFAULT_NPARTITIONS + 1
)
col_partitions = [
columns[i : i + column_splits]
for i in range(0, len(columns), column_splits)
]
column_widths = [len(c) for c in col_partitions]
return col_partitions, column_widths
@classmethod
def build_dtypes(cls, partition_ids, columns):
# Compute dtypes concatenating the results from each of the columns splits
# determined above. This creates a pandas Series that contains a dtype for every
# column.
dtypes = pandas.concat(cls.materialize(list(partition_ids)), axis=0)
dtypes.index = columns
return dtypes
@classmethod
def build_query_compiler(cls, path, columns, **kwargs):
col_partitions, column_widths = cls.build_columns(columns)
partition_ids = cls.call_deploy(path, col_partitions, **kwargs)
index, row_lens = cls.build_index(partition_ids)
remote_parts = cls.build_partition(partition_ids[:-2], row_lens, column_widths)
dtypes = cls.build_dtypes(partition_ids[-1], columns)
new_query_compiler = cls.query_compiler_cls(
cls.frame_cls(
remote_parts,
index,
columns,
row_lens,
column_widths,
dtypes=dtypes,
)
)
return new_query_compiler
|
the-stack_106_19639
|
import json
import sys
from os import path
import vk
from PyQt5 import QtGui
from PyQt5.QtCore import QRegExp, QObject, pyqtSignal, QEventLoop
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.uic import loadUi
import google_export
import vktests
class Window(QMainWindow):
def __init__(self, parent=None):
super().__init__()
QMainWindow.__init__(self)
loadUi('MainWindow.ui', self)
self.GauthIco.setPixmap(QtGui.QPixmap("ico/question.png"))
self.vkdialog = VKauthWindow()
self.gsheetdialog = GSheets()
self.VKauthExists = False
self.vkauth = {}
self.client = []
self.targetQ = ""
self.data = []
self.nums = {}
# Centering window
# From https://pythonprogramminglanguage.com/pyqt5-center-window/
qtRectangle = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
qtRectangle.moveCenter(centerPoint)
self.move(qtRectangle.topLeft())
self._stdout = StdoutRedirect()
self._stdout.start()
self._stdout.printOccur.connect(lambda x: self._append_text(x))
def SaveBtn_clicked(self):
if self.txtCheck.checkState(): vktests.save_to_txt(self.data)
if self.csvCheck.checkState(): vktests.save_to_csv(self.data)
if self.googleCheck.checkState(): self.gsheetdialog.show()
def BtnsDisable(self):
self.startBtn.setEnabled(False)
self.saveBtn.setEnabled(False)
if len(self.targetQLine.text()) > 0:
self.targetQ = self.targetQLine.text()
self.startBtn.setEnabled(True)
if self.txtCheck.checkState() or self.csvCheck.checkState() or self.googleCheck.checkState():
self.saveBtn.setEnabled(True)
def StartBtn_clicked(self):
self.consoleOut.setEnabled(True)
self.data = []
self.data = vktests.all_for_msgs(self.vkapi, self.data, self.targetQ, self.vkauth["ADMIN_ID"])
if len(self.data) == 0:
print("По запросу не найдено результатов")
self.outputBox.setEnabled(False)
else:
self.outputBox.setEnabled(True)
def GSheetsExport(self, sheet_name, wsheet_name):
google_export.main_export(self.client, self.data, sheet_name, wsheet_name)
def VKauthExists_get(self):
return self.VKauthExists
def vkauth_get(self):
return self.vkauth
def VKauthBtn_clicked(self):
self.vkdialog.show()
self.vkdialog.load()
def GauthBtn_clicked(self):
temp = QFileDialog.getOpenFileName(self, None, "", "JSON (*.json)")
GauthFile = path.basename(temp[0])
if GauthFile:
print(f"Выбран {GauthFile}")
self.GauthTest(GauthFile)
def GauthTest(self, GauthFile):
self.client = google_export.login(GauthFile)
if self.client == -1:
self.GauthLabel.setText("Файл аутентификации Google не принят")
self.GauthIco.setPixmap(QtGui.QPixmap("ico/question.png"))
self.googleCheck.setEnabled(False)
ErrorMsg(2)
else:
self.GauthLabel.setText("Файл аутентификации Google принят")
self.GauthIco.setPixmap(QtGui.QPixmap("ico/tick.png"))
self.googleCheck.setEnabled(True)
def AuthTest(self):
try:
with open("vk-auth.json") as f:
self.vkauth = json.load(f)
self.VKauthExists = True
self.vkapi = vktests.login(self.vkauth["TOKEN"])
self.nums = vktests.starting_info(self.vkapi, self.vkauth["ADMIN_ID"])
if self.nums['num'] == 0: raise WrongAdmIDEx
self.auth_UI([0])
except FileNotFoundError:
self.VKauthExists = False
self.auth_UI([1, 0])
except vk.exceptions.VkAPIError:
self.auth_UI([1, 1])
ErrorMsg(0)
except WrongAdmIDEx:
self.auth_UI([1, 1])
ErrorMsg(1)
def auth_UI(self, mode):
if mode[0] == 0:
self.msgsNumLabel.setText(f"Всего сообщений в диалоге — {self.nums['num']}")
self.VKauthLabel.setText("Файл аутентификации VK принят")
self.VKauthIco.setPixmap(QtGui.QPixmap("ico/tick.png"))
self.VKauthBtn.setText("Изменить?")
self.groupLabel.setText(f"Целевое сообщество — {self.nums['grpname']}")
self.adminLabel.setText(f"Выбранный администратор — {self.nums['admname']}")
self.functionBox.setEnabled(True)
elif mode[0] == 1:
self.VKauthIco.setPixmap(QtGui.QPixmap("ico/cross.png"))
self.groupLabel.setText("Целевое сообщество — ?")
self.adminLabel.setText("Выбранный администратор — ?")
self.msgsNumLabel.setText("Всего сообщений в диалоге — ?")
if mode[1] == 0:
self.VKauthLabel.setText("Файл аутентификации VK не найден")
self.VKauthBtn.setText("Создать")
elif mode[1] == 1:
self.VKauthLabel.setText("Файл аутентификации VK не принят")
self.VKauthBtn.setText("Изменить")
def _append_text(self, msg):
self.consoleOut.moveCursor(QtGui.QTextCursor.End)
self.consoleOut.insertPlainText(msg)
# refresh textedit show, refer) https://doc.qt.io/qt-5/qeventloop.html#ProcessEventsFlag-enum
QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)
# Redirecting stdout/stderr output to QEditLine
# From https://4uwingnet.tistory.com/9
class StdoutRedirect(QObject):
printOccur = pyqtSignal(str, str, name="print")
def __init__(self, *param):
QObject.__init__(self, None)
self.daemon = True
self.sysstdout = sys.stdout.write
self.sysstderr = sys.stderr.write
def stop(self):
sys.stdout.write = self.sysstdout
sys.stderr.write = self.sysstderr
def start(self):
sys.stdout.write = self.write
sys.stderr.write = lambda msg: self.write(msg, color="red")
def write(self, s, color="black"):
sys.stdout.flush()
self.printOccur.emit(s, color)
class GSheets(QDialog):
def __init__(self, parent=None):
super().__init__()
QDialog.__init__(self)
loadUi('GSheets.ui', self)
def OKDisable(self):
self.OKBtn.setEnabled(False)
if len(self.sheet_name.text()) > 0 and len(self.wsheet_name.text()) > 0:
self.OKBtn.setEnabled(True)
def accept(self):
Window.GSheetsExport(myWindow, self.sheet_name.text(), self.wsheet_name.text())
self.close()
# Dialog for vk-auth.json file generating
class VKauthWindow(QDialog):
def __init__(self, parent=None):
super().__init__()
QDialog.__init__(self)
loadUi('VKauthWin.ui', self)
self.tokenLine.setValidator(QRegExpValidator(QRegExp('([a-z0-9])*')))
self.adminLine.setValidator(QIntValidator())
def load(self):
VKauthExists = Window.VKauthExists_get(myWindow)
if not VKauthExists:
self.OKBtn.setEnabled(False)
else:
self.tokenLine.setText(Window.vkauth_get(myWindow)["TOKEN"])
self.adminLine.setText(Window.vkauth_get(myWindow)["ADMIN_ID"])
def help(self):
pixmap = QPixmap('help.jpg')
helpMsg = QMessageBox()
helpMsg.setWindowTitle("Помощь по аутентификации")
helpMsg.setIconPixmap(pixmap)
helpMsg.resize(pixmap.height(), pixmap.width())
helpMsg.exec_()
def OKDisable(self):
self.OKBtn.setEnabled(False)
if len(self.tokenLine.text()) > 0 and len(self.adminLine.text()) > 0:
self.OKBtn.setEnabled(True)
def accept(self):
auth = {"TOKEN": f"{self.tokenLine.text()}", "ADMIN_ID": f"{self.adminLine.text()}"}
with open('vk-auth.json', 'w') as outfile:
json.dump(auth, outfile, indent=4)
Window.AuthTest(myWindow)
self.close()
class WrongAdmIDEx(Exception):
""""Raised when number of messages in dialogue equals 0 | Dialogue not exists"""
pass
def ErrorMsg(ErrNum):
msg = QMessageBox()
msg.setWindowTitle("Ошибка")
msg.setIcon(QMessageBox.Warning)
if ErrNum == 0:
msg.setText("Упс, токен аутентификации VK недействителен.")
elif ErrNum == 1:
msg.setText("Упс, введен неправильный ID администратора или диалог пуст.")
elif ErrNum == 2:
msg.setText("Упс, проверьте ваш файл аутентификации.")
msg.exec_()
if __name__ == '__main__':
app = QApplication(sys.argv)
myWindow = Window(None)
myWindow.show()
myWindow.AuthTest()
app.exec_()
|
the-stack_106_19640
|
for _ in range(int(input())):
N,A,B = map(int, input().split())
s=input()
time=0
for i in s:
if(i=="1"):
time+=B
if(i=="0"):
time+=A
print(time)
|
the-stack_106_19643
|
import os
import sys
import numpy as np
import pandas as pd
import logging
import gc
import tqdm
import pickle
import json
import time
import tempfile
from gensim.models import Word2Vec
from sklearn.metrics import accuracy_score, roc_auc_score
import torch
from torch import nn
import torch.nn.functional as F
from data_loader import train_data_loader, test_data_loader
from transformer_encoder_classifier import Transformer_Encoder_Classifier
cwd = os.getcwd()
train_path = os.path.join(cwd, 'train_artifact')
test_path = os.path.join(cwd, 'test_artifact')
input_path = os.path.join(cwd, 'input_artifact')
input_split_path = os.path.join(cwd, 'input_split_artifact')
embed_path = os.path.join(cwd, 'embed_artifact')
model_path = os.path.join(cwd, 'model_artifact')
registry_path = os.path.join(embed_path, 'w2v_registry.json')
with open(registry_path, 'r') as f:
w2v_registry = json.load(f)
def initiate_logger(log_path):
"""
Initialize a logger with file handler and stream handler
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-s: %(message)s', datefmt='%H:%M:%S')
fh = logging.FileHandler(log_path)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.info('===================================')
logger.info('Begin executing at {}'.format(time.ctime()))
logger.info('===================================')
return logger
def get_torch_module_num_of_parameter(model):
"""
Get # of parameters in a torch module.
"""
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
def train(model, train_inp_tuple, validation_inp_tuple, checkpoint_dir, checkpoint_prefix, device, epoches=5, batch_size=1024, logger=None, epoch_start=0, max_seq_len=100, lr=1e-3):
"""
: model (torch.nn.module): model to be trained
: train_inp_tuple (list[tuple(str, list[str], list[str])]): list of input for train_data_loader
: str: path to label data
: list[str]: list of embedding variables
: list[str]: list of paths to a pkl file
: validation_inp_tuple (list[tuple(str, list[str], list[str])]): list of input for train_data_loader
: str: path to label data
: list[str]: list of embedding variables
: list[str]: list of paths to a pkl file
: checkpoint_dir (str): path to checkpoint directory
: checkpoint_prefix (str): prefix of checkpoint file
: device (str): device to train the model
: epoches (int): number of epoches to train
: batch_size (int): size of mini batch
: epoch_start (int): if = 0 then train a new model, else load an existing model and continue to train, default 0
: max_seq_len (int): max length for sequence input, default 100
: lr (float): learning rate for Adam, default 1e-3
"""
global w2v_registry, model_path
gc.enable()
# Check checkpoint directory
if not os.path.isdir(checkpoint_dir): os.mkdir(checkpoint_dir)
# Load model if not train from scratch
if epoch_start != 0:
model_artifact_path = os.path.join(checkpoint_dir, '{}_{}.pth'.format(checkpoint_prefix, epoch_start))
model.load_state_dict(torch.load(model_artifact_path))
if logger: logger.info('Start retraining from epoch {}'.format(epoch_start))
print("#####model_artifact_path=", model_artifact_path)
# Set up loss function and optimizer
model.to(device)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr, amsgrad=True)
div, mod = divmod(810000, batch_size)
n_batch_estimate = div + min(mod, 1)
# Main Loop
for epoch in range(1+epoch_start, epoches+1+epoch_start):
if logger:
logger.info('=========================')
logger.info('Processing Epoch {}/{}'.format(epoch, epoches+epoch_start))
logger.info('=========================')
# Train model
model.train()
train_running_loss, train_n_batch = 0, 0
for index, (label_artifact_path, seq_inp_target, seq_inp_path) in enumerate(train_inp_tuple, start=1):
print("#####epoch=", epoch, "index=", index, "label_artifac_path=", label_artifact_path,
"\nseq_inp_target=", seq_inp_target, "seq_inp_path=", seq_inp_path)
train_loader = train_data_loader(label_artifact_path, seq_inp_target, seq_inp_path, w2v_registry, batch_size=batch_size, max_seq_len=max_seq_len)
train_iterator = iter(train_loader)
while True:
try:
y, x_seq, x_last_idx = next(train_iterator) # 6输入,6个序列 [6, batch_size, max_seq_len, embed_size]
y = torch.from_numpy(y).long().to(device)
x = []
for s in x_seq:
x.append(s.to(device))
x.append(x_last_idx) # x中加入序列长度-1
optimizer.zero_grad()
yp = F.softmax(model(*x), dim=1)
loss = loss_fn(yp, y)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=100)
optimizer.step()
train_running_loss += loss.item()
train_n_batch += 1
if train_n_batch%4000==0 and logger:
logger.info('Epoch {}/{} - Batch {}/{} Done - Train Loss: {:.6f}'.format(epoch, epoches+epoch_start, train_n_batch, n_batch_estimate, train_running_loss/train_n_batch))
del x, y, yp, x_seq, x_last_idx
_ = gc.collect()
torch.cuda.empty_cache()
except StopIteration:
break
del train_loader, train_iterator
_ = gc.collect()
torch.cuda.empty_cache()
if logger:
logger.info('Epoch {}/{} - Batch {}/{} Done - Train Loss: {:.6f}'.format(epoch, epoches+epoch_start, train_n_batch, n_batch_estimate, train_running_loss/train_n_batch))
# Evaluate model
model.eval()
test_running_loss, test_n_batch = 0, 0
true_y, pred_y = [], []
for index, (label_artifact_path, seq_inp_target, seq_inp_path) in enumerate(validation_inp_tuple, start=1):
train_loader = train_data_loader(label_artifact_path, seq_inp_target, seq_inp_path, w2v_registry, batch_size=batch_size, max_seq_len=max_seq_len)
train_iterator = iter(train_loader)
while True:
try:
y, x_seq, x_last_idx = next(train_iterator)
y = torch.from_numpy(y).long().to(device)
x = []
for s in x_seq:
x.append(s.to(device))
x.append(x_last_idx)
yp = F.softmax(model(*x), dim=1)
loss = loss_fn(yp, y)
pred_y.extend(list(yp.cpu().detach().numpy()))
true_y.extend(list(y.cpu().detach().numpy()))
test_running_loss += loss.item()
test_n_batch += 1
del x, y, yp, x_seq, x_last_idx
_ = gc.collect()
torch.cuda.empty_cache()
except StopIteration:
break
del train_loader, train_iterator
_ = gc.collect()
torch.cuda.empty_cache()
pred = np.argmax(np.array(pred_y), 1)
true = np.array(true_y).reshape((-1,))
acc_score = accuracy_score(true, pred)
del pred, true, pred_y, true_y
_ = gc.collect()
torch.cuda.empty_cache()
if logger:
logger.info('Epoch {}/{} Done - Test Loss: {:.6f}, Test Accuracy: {:.6f}'.format(epoch, epoches+epoch_start, test_running_loss/test_n_batch, acc_score))
# Save model state dict
ck_file_name = '{}_{}.pth'.format(checkpoint_prefix, epoch)
ck_file_path = os.path.join(checkpoint_dir, ck_file_name)
torch.save(model.state_dict(), ck_file_path)
if __name__=='__main__':
assert len(sys.argv)>=6
epoch_start = int(sys.argv[1])
epoches = int(sys.argv[2])
batch_size = int(sys.argv[3])
max_seq_len = int(sys.argv[4])
lr = float(sys.argv[5])
print("get params=", sys.argv)
if len(sys.argv)>6:
train_inp_tuple = [(os.path.join(input_split_path, 'train_age_{}.npy'.format(i)), ['creative'],
[os.path.join(input_split_path, 'train_creative_id_seq_{}.pkl'.format(i))]) for i in range(1,10)]
validation_inp_tuple = [(os.path.join(input_split_path, 'train_age_{}.npy'.format(i)), ['creative'],
[os.path.join(input_split_path, 'train_creative_id_seq_{}.pkl'.format(i))]) for i in range(10,11)]
checkpoint_dir = os.path.join(model_path, 'Transformer_Encoder_Classifier_Creative_Age')
checkpoint_prefix = 'Transformer_Encoder_Classifier_Creative_Age'
else:
embs = ['product'] # ,,'product', 'advertiser', 'ad' 'creative'
train_inp_tuple = [(os.path.join(input_path, 'train_age_tra.npy'), embs,
[os.path.join(input_path, 'train_' + emb +'_id_seq_tra.pkl') for emb in embs])]
validation_inp_tuple = [(os.path.join(input_path, 'train_age_val.npy'), embs,
[os.path.join(input_path, 'train_' + emb +'_id_seq_val.pkl') for emb in embs])]
checkpoint_dir = os.path.join(model_path, 'Transformer_Encoder_Classifier_Creative_Age')
checkpoint_prefix = 'Transformer_Encoder_Classifier_Creative_Age'
print("get train_inp_tuple=", train_inp_tuple, "\n\nvalidation_inp_tupe=", validation_inp_tuple, "\n\ncheckpoint_dir=", checkpoint_dir,
"\n\ncheckpoint_prefix=", checkpoint_prefix)
logger = initiate_logger('Transformer_Encoder_Classifier_Creative_Age.log')
logger.info('Epoch Start: {}, Epoch to Train: {}, Batch Size: {}, Max Sequence Length: {}, Learning Rate: {}'.format(epoch_start, epoches, batch_size, max_seq_len, lr))
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info('Device in Use: {}'.format(DEVICE))
if torch.cuda.is_available():
torch.cuda.empty_cache()
t = torch.cuda.get_device_properties(DEVICE).total_memory/1024**3
c = torch.cuda.memory_cached(DEVICE)/1024**3
a = torch.cuda.memory_allocated(DEVICE)/1024**3
logger.info('CUDA Memory: Total {:.2f} GB, Cached {:.2f} GB, Allocated {:.2f} GB'.format(t,c,a))
#embed_size, out_size, n_enc_layer, n_head, intermediate_size, device, transformer_dropout=0.1, mlp_dropout=0.4,
model = Transformer_Encoder_Classifier(embed_size=128, out_size=10, n_enc_layer=1, n_head=8, intermediate_size=1024,
device=DEVICE).to(DEVICE)
logger.info('Model Parameter #: {}'.format(get_torch_module_num_of_parameter(model)))
train(model, train_inp_tuple, validation_inp_tuple, checkpoint_dir, checkpoint_prefix, DEVICE,
epoches=epoches, batch_size=batch_size, logger=logger, epoch_start=epoch_start, max_seq_len=max_seq_len, lr=lr)
|
the-stack_106_19644
|
#!/usr/bin/python3
import numpy as np
import helper.basis
import helper.function
class FinanceInterpolant(helper.function.Interpolant):
def __init__(
self, basis, X, L, I, fX, aX=None, bounds=None,
boundsTransformed=None, gridType=None, p=None, struct=None,
info=None):
super().__init__(basis, X, L, I, fX, aX=aX)
self.bounds = bounds
self.boundsTransformed = boundsTransformed
self.gridType = gridType
self.p = p
self.struct = struct
self.info = info
@staticmethod
def fromStruct(struct, info=None):
X = struct["gridPoints"]
gridString = struct["gridString"][0]
L, I = FinanceInterpolant.parseGridString(gridString)
d = X.shape[1]
bounds = np.row_stack((struct["lowerBounds"], struct["upperBounds"]))
boundsTransformed = np.row_stack((struct["lbt"], struct["ubt"]))
fX = struct["values"]
aX = struct["surpluses"]
gridType = struct["gridType"][0]
p = struct["degree"]
struct = struct
domainTrafo = struct["DomainTrafo"]
valueTrafo = struct["ValueTrafo"]
p = (p[0,0] if len(p) > 0 else 1)
if gridType == "lagrange-notaknot-spline-boundary":
basis1D = helper.basis.HierarchicalWeaklyFundamentalSpline(p)
elif gridType == "linear-boundary":
assert p == 1
basis1D = helper.basis.HierarchicalBSpline(p)
else:
raise ValueError("Unknown grid type.")
basis = helper.basis.TensorProduct(basis1D, d)
assert domainTrafo.size == 0
assert valueTrafo.size == 0
interpolant = FinanceInterpolant(
basis, X, L, I, fX, aX=aX, bounds=bounds,
boundsTransformed=boundsTransformed, gridType=gridType, p=p,
struct=struct, info=info)
return interpolant
@staticmethod
def parseGridString(gridString):
lines = [x.strip() for x in gridString.splitlines()]
gridType = lines[0]
del lines[0]
version, d, N = [int(x) for x in lines[0].split()]
del lines[0]
stretchingMode = int(lines[0])
assert stretchingMode == 0
del lines[0]
boundingBox = lines[0]
assert boundingBox == " ".join(d * ["0.000000e+00 1.000000e+00 0 0"])
del lines[0]
L, I = [], []
for k in range(N):
d2 = int(lines[0])
assert d == d2
li = [int(x) for x in lines[1].split()]
L.append(li[0::2])
I.append(li[1::2])
leaf = bool(int(lines[2]))
del lines[:3]
L, I = np.array(L), np.array(I)
return L, I
def _evaluateBasis(self, k, XX):
XXunitCube = (XX - self.bounds[0]) / (self.bounds[1] - self.bounds[0])
return super()._evaluateBasis(k, XXunitCube)
def createJInterpolant(solution, t, discreteStateName, name="interpOptJ"):
if t < 0: t += solution.size
return FinanceInterpolant.fromStruct(
solution[0,t][name][discreteStateName][0,0][0,0],
info={"name" : name, "t" : t, "T" : solution.size,
"discreteStateName" : discreteStateName})
def createGradJInterpolant(
solution, t, discreteStateName, gradientStateName, name="interpGradJ"):
if t < 0: t += solution.size
return FinanceInterpolant.fromStruct(
solution[0,t][name][discreteStateName][0,0][gradientStateName][0,0][0,0],
info={"name" : name, "t" : t, "T" : solution.size,
"discreteStateName" : discreteStateName,
"gradientStateName" : gradientStateName})
def createPolicyInterpolant(interpPolicy, t, discreteStateName, policyName):
if t < 0: t += interpPolicy.size
return FinanceInterpolant.fromStruct(
interpPolicy[0,t][discreteStateName][0,0][policyName][0,0],
info={"name" : "interpPolicy", "t" : t, "T" : interpPolicy.size,
"discreteStateName" : discreteStateName,
"policyName" : policyName})
|
the-stack_106_19645
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
from ibm_boto3.session import Session
from ibm_boto3.resources.collection import ResourceCollection
class TestCollection(unittest.TestCase):
def setUp(self):
self.session = Session(
aws_access_key_id='dummy', aws_secret_access_key='dummy',
region_name='us-east-1')
# Pick an arbitrary resource.
self.s3_resource = self.session.resource('s3')
def test_can_use_collection_methods(self):
self.assertIsInstance(
self.s3_resource.instances.all(), ResourceCollection)
def test_can_chain_methods(self):
self.assertIsInstance(
self.s3_resource.instances.all().page_size(5), ResourceCollection)
|
the-stack_106_19646
|
# pybatch
# github.com/sbritorodr/pybatch
# This program executes any instruction for all files inside a folder
# (e.g. convert all these mp4 files into mkv using ffmpeg)
# the idea is to take, for example:
# ffmpeg -i [i] [o]
# sustitute input for each instance and generate an output with the same
# name inside an ./output folder
# Title screen
print('''
______ _ _
| ___ \ | | | |
_ __ _ _| |_/ / __ _| |_ ___| |__
| '_ \| | | | ___ \/ _` | __/ __| '_ \
| |_) | |_| | |_/ / (_| | || (__| | | |
| .__/ \__, \____/ \__,_|\__\___|_| |_|
| | __/ |
|_| |___/
Make orders automatically
github.com/sbritorodr/pybatch
''')
import os
# help function
def help():
print(
'''
This program needs a command, an input [i] and output [o]
For example, in order to convert some files in ffmpeg you
need to write this:
ffmpeg -i [i] [o].mkv
''')
# get working directoy
pwd = os.path.dirname(os.path.abspath(__file__))
# get folder:
input_folder = str(input("Add your folder path or leave it blank to select \"./ \": \n")) or "./"
print("You selected this folder: " + input_folder)
directory = pwd
if input_folder.startswith('.'):
input_folder = input_folder.replace('.', '')
directory += input_folder
elif input_folder.startswith('/'):
directory = input_folder
print(directory)
# select file type:
file_type = str(input("select file type (.txt, .pdf, .rar...)")) or ""
print("you selected \"" + file_type + "\"")
# delete all whitespaces
def remove_whitespaces(parent):
for path, folders, files in os.walk(parent):
for f in files:
os.rename(os.path.join(path, f), os.path.join(path, f.replace(' ', '_')))
for i in range(len(folders)):
new_name = folders[i].replace(' ', '_')
os.rename(os.path.join(path, folders[i]), os.path.join(path, new_name))
folders[i] = new_name
remove_whitespaces(directory)
# Loop to get all files
list_files = []
for file in os.listdir(directory):
if file.endswith(file_type):
list_files.append(file)
print(list_files)
# check if you mess up something
if list_files == []:
raise Exception("No files are selected. Check if you have some files or you input folder exists")
# command string. It substitutes the [i] selection and the [o] with the algorithm
command = str(input("Add your command here. Write h if you need help:\n" )) or "ffmpeg -i [i] [o].mp3"
command_def = command # save the variable anywhere for the "main" loop
if command == 'h':
help()
elif "[i]" not in command:
raise Exception("you didn't add [i] or [o].")
if '[o]' in command:
try: os.mkdir("output")
except: 0
# Command loop with [i] and [o] replacement
for file in list_files:
command = command_def
output_file = file.split('.', 1)[0] # remove the file extension, to avoid '.mp4.mp3' gibberish
command = command.replace('[i]', directory + file)
if "[o]" not in command:
command = command.replace('[o]', "")
else:
command = command.replace('[o]', pwd + "/output/" + output_file)
print(command)
os.system(command)
# End message
print("\n \n Script finished with no errors. If something has gone wrong, check if you write the command correctly")
|
the-stack_106_19647
|
# -*- coding: utf-8 -*-
"""Windows Registry custom event formatter helpers."""
from plaso.formatters import interface
from plaso.formatters import manager
class WindowsRegistryValuesFormatterHelper(
interface.CustomEventFormatterHelper):
"""Windows Registry values formatter helper."""
IDENTIFIER = 'windows_registry_values'
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
values = event_values.get('values', None)
if not values:
event_values['values'] = '(empty)'
manager.FormattersManager.RegisterEventFormatterHelper(
WindowsRegistryValuesFormatterHelper)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.