content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
"""Generates the pins file for the SWM320"""
from __future__ import print_function
import re
import sys
import argparse
class Pin:
"""Holds the information associated with a pin."""
def __init__(self, name, port, pbit, preg, IRQn):
self.name = name
self.port = port
self.pbit = pbit
self.preg = preg
self.IRQn = IRQn
def print(self):
print('pin_obj_t pin_{:4s} = PIN({:4s}, {:5s}, {:5s}, {:s}, {:10s});\n'.format(self.name, self.name, self.port, self.pbit, self.preg, self.IRQn))
def print_header(self, hdr_file):
hdr_file.write('extern pin_obj_t pin_{:4s};\n'.format(self.name))
class Pins:
def __init__(self):
self.pins = [] # list of Pin
def find_pin(self, port, pbit):
for pin in self.pins:
if pin.port == port and pin.pbit == pbit:
return pin
def find_pin_by_name(self, name):
for pin in self.pins:
if pin.name == name:
return pin
def parse_af_file(self, filename):
with open(filename, 'r') as f:
for line in f.readlines():
match = re.match(r'#define PORT([ABCD])_PIN(\d+)_GPIO', line)
if match:
name = 'P' + match.group(1) + match.group(2)
for pin in self.pins:
if pin.name == name:
break
else:
preg= 'PIN_BIT_BAND(%s, %2s)' %('GPIO'+match.group(1), match.group(2))
pin = Pin(name, 'GPIO'+match.group(1), 'PIN'+match.group(2), preg, 'GPIO%s_IRQn' %match.group(1))
self.pins.append(pin)
def print(self):
for pin in self.pins:
pin.print()
print('')
print('STATIC const mp_rom_map_elem_t pins_locals_dict_table[] = {')
for pin in self.pins:
print(' {{ MP_ROM_QSTR(MP_QSTR_{:5s}), MP_ROM_PTR(&pin_{:5s}) }},'.format(pin.name, pin.name))
print('};')
print('')
print('MP_DEFINE_CONST_DICT(pins_locals_dict, pins_locals_dict_table);')
def print_header(self, hdr_filename):
with open(hdr_filename, 'wt') as hdr_file:
for pin in self.pins:
pin.print_header(hdr_file)
def print_qstr(self, qstr_filename):
with open(qstr_filename, 'wt') as qstr_file:
for pin in self.pins:
print('Q({})'.format(pin.name), file=qstr_file)
def main():
parser = argparse.ArgumentParser(
prog="make-pins.py",
usage="%(prog)s [options] [command]",
description="Generate board specific pin file"
)
parser.add_argument(
"-a", "--af",
dest="af_filename",
help="Specifies the alternate function file for the chip",
default="../chip/SWM3200_port.h"
)
parser.add_argument(
"-p", "--prefix",
dest="prefix_filename",
help="Specifies beginning portion of generated pins file",
default="SWM320_prefix.c"
)
parser.add_argument(
"-q", "--qstr",
dest="qstr_filename",
help="Specifies name of generated qstr header file",
default="../build-SWM320Lite/pins_qstr.h"
)
parser.add_argument(
"-r", "--hdr",
dest="hdr_filename",
help="Specifies name of generated pin header file",
default="../build-SWM320Lite/pins.h"
)
args = parser.parse_args(sys.argv[1:])
pins = Pins()
print('// This file was automatically generated by make-pins.py')
print('//')
if args.af_filename:
print('// --af {:s}'.format(args.af_filename))
pins.parse_af_file(args.af_filename)
if args.prefix_filename:
print('// --prefix {:s}'.format(args.prefix_filename))
print('')
with open(args.prefix_filename, 'r') as prefix_file:
print(prefix_file.read())
pins.print()
pins.print_qstr(args.qstr_filename)
pins.print_header(args.hdr_filename)
if __name__ == "__main__":
main()
| 31.438462 | 153 | 0.56276 | [
"MIT"
] | Synwit-Co-Ltd/micropython | ports/swm320/boards/make-pins.py | 4,087 | Python |
from threading import Thread
import time
def timer(name, delay, repeat):
print "Timer: " + name + " Started"
while repeat > 0:
time.sleep(delay)
print name + ": " + str(time.ctime(time.time()))
repeat -= 1
print "Timer: " + name + " Completed"
def Main():
t1 = Thread(target=timer, args=("Timer1", 1, 5))
t2 = Thread(target=timer, args=("Timer2", 2, 5))
t1.start()
t2.start()
print "main is done"
if __name__ == '__main__':
Main()
| 20.708333 | 56 | 0.571429 | [
"MIT"
] | joshdabosh/autopack | test_files/multi-threading_test.py | 497 | Python |
# pylint: disable=no-member, redefined-outer-name
"""
Annalist resource types module
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
# import logging
# log = logging.getLogger(__name__)
from annalist.identifiers import ANNAL
# """
# Each resource type URI or CURIE is associated with a list of one or more file
# extensions and MIME content-types.
#
# The first of each list indicates the value used when creating or serving a
# resource of the indicated type. Any other values given are alternatives
# that are accepted as supplying a resource that is compatible with the type.
#
# File extensions and MIME types are presented as pairs so that an extension
# can be inferred when a MIME content-type is given, and vice versa.
# """
resource_types = (
{ ANNAL.CURIE.Metadata:
[ ("jsonld", "application/ld+json")
, ("json", "application/json")
]
, ANNAL.CURIE.Text:
[ ("txt", "text/plain")
]
, ANNAL.CURIE.Richtext:
[ ("md", "text/markdown")
, ("txt", "text/plain")
]
, ANNAL.CURIE.Image:
[ ("image", "image/*") # Default extension
, ("png", "image/png")
, ("jpg", "image/jpeg")
, ("jpeg", "image/jpeg")
, ("gif", "image/gif")
, ("tiff", "image/tiff")
, ("svg", "image/svg")
, ("pdf", "application/pdf")
]
, ANNAL.CURIE.Audio:
[ ("audio", "audio/*") # Default extension
, ("mp3", "audio/mpeg")
, ("mp4", "audio/mp4")
, ("wav", "audio/wav")
, ("ogg", "audio/ogg")
#@@ needs fleshing out?
]
, ANNAL.CURIE.Resource:
[ ("md", "text/markdown")
, ("txt", "text/plain")
, ("png", "image/png")
, ("jpg", "image/jpeg")
, ("jpeg", "image/jpeg")
, ("gif", "image/gif")
, ("tiff", "image/tiff")
, ("svg", "image/svg")
, ("pdf", "application/pdf")
]
})
default_types = [("dat", "application/octet-stream")]
def file_extension(typeuri):
"""
Returns preferred file extension for resource type
>>> file_extension(ANNAL.CURIE.Metadata) == "jsonld"
True
>>> file_extension(ANNAL.CURIE.Richtext) == "md"
True
"""
return resource_types.get(typeuri, default_types)[0][0]
def content_type(typeuri):
"""
Returns preferred MIME content-type for resource type
>>> content_type(ANNAL.CURIE.Metadata) == "application/ld+json"
True
>>> content_type(ANNAL.CURIE.Richtext) == "text/markdown"
True
"""
return resource_types.get(typeuri, default_types)[0][1]
def file_extension_for_content_type(typeuri, content_type):
"""
Returns file extension for given content-type as an instance of a given type URI,
or None.
>>> file_extension_for_content_type(ANNAL.CURIE.Richtext, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/pdf") == "pdf"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/unknown") == None
True
"""
for fe, ct in resource_types.get(typeuri, default_types):
if ct == content_type:
return fe
return None
def content_type_for_file_extension(typeuri, file_extension):
"""
Returns content-type for given file extension as an instance of a given type URI,
or None.
>>> content_type_for_file_extension(ANNAL.CURIE.Richtext, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "pdf") == "application/pdf"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "unknown") == None
True
"""
for fe, ct in resource_types.get(typeuri, default_types):
if fe == file_extension:
return ct
return None
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
| 30.138889 | 92 | 0.617742 | [
"MIT"
] | gklyne/annalist | src/annalist_root/annalist/resourcetypes.py | 4,340 | Python |
import logging
import os
from collections import defaultdict
from typing import Dict
from typing import List
from typing import Union
import requests
from cachecontrol import CacheControl
from cachecontrol.caches.file_cache import FileCache
from cachecontrol.controller import logger as cache_control_logger
from cachy import CacheManager
from html5lib.html5parser import parse
from poetry.core.packages import Dependency
from poetry.core.packages import Package
from poetry.core.packages import dependency_from_pep_508
from poetry.core.packages.utils.link import Link
from poetry.core.semver import VersionConstraint
from poetry.core.semver import VersionRange
from poetry.core.semver import parse_constraint
from poetry.core.semver.exceptions import ParseVersionError
from poetry.core.version.markers import parse_marker
from poetry.locations import REPOSITORY_CACHE_DIR
from poetry.utils._compat import Path
from poetry.utils._compat import to_str
from poetry.utils.helpers import download_file
from poetry.utils.helpers import temporary_directory
from poetry.utils.patterns import wheel_file_re
from ..inspection.info import PackageInfo
from .exceptions import PackageNotFound
from .remote_repository import RemoteRepository
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
cache_control_logger.setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
class PyPiRepository(RemoteRepository):
CACHE_VERSION = parse_constraint("1.0.0")
def __init__(self, url="https://pypi.org/", disable_cache=False, fallback=True):
super(PyPiRepository, self).__init__(url.rstrip("/") + "/simple/")
self._base_url = url
self._disable_cache = disable_cache
self._fallback = fallback
release_cache_dir = REPOSITORY_CACHE_DIR / "pypi"
self._cache = CacheManager(
{
"default": "releases",
"serializer": "json",
"stores": {
"releases": {"driver": "file", "path": str(release_cache_dir)},
"packages": {"driver": "dict"},
},
}
)
self._cache_control_cache = FileCache(str(release_cache_dir / "_http"))
self._name = "PyPI"
@property
def session(self):
return CacheControl(requests.session(), cache=self._cache_control_cache)
def find_packages(self, dependency): # type: (Dependency) -> List[Package]
"""
Find packages on the remote server.
"""
constraint = dependency.constraint
if constraint is None:
constraint = "*"
if not isinstance(constraint, VersionConstraint):
constraint = parse_constraint(constraint)
allow_prereleases = dependency.allows_prereleases()
if isinstance(constraint, VersionRange):
if (
constraint.max is not None
and constraint.max.is_prerelease()
or constraint.min is not None
and constraint.min.is_prerelease()
):
allow_prereleases = True
try:
info = self.get_package_info(dependency.name)
except PackageNotFound:
self._log(
"No packages found for {} {}".format(dependency.name, str(constraint)),
level="debug",
)
return []
packages = []
ignored_pre_release_packages = []
for version, release in info["releases"].items():
if not release:
# Bad release
self._log(
"No release information found for {}-{}, skipping".format(
dependency.name, version
),
level="debug",
)
continue
try:
package = Package(info["info"]["name"], version)
except ParseVersionError:
self._log(
'Unable to parse version "{}" for the {} package, skipping'.format(
version, dependency.name
),
level="debug",
)
continue
if package.is_prerelease() and not allow_prereleases:
if constraint.is_any():
# we need this when all versions of the package are pre-releases
ignored_pre_release_packages.append(package)
continue
if not constraint or (constraint and constraint.allows(package.version)):
packages.append(package)
self._log(
"{} packages found for {} {}".format(
len(packages), dependency.name, str(constraint)
),
level="debug",
)
return packages or ignored_pre_release_packages
def package(
self,
name, # type: str
version, # type: str
extras=None, # type: (Union[list, None])
): # type: (...) -> Package
return self.get_release_info(name, version).to_package(name=name, extras=extras)
def search(self, query):
results = []
search = {"q": query}
response = requests.session().get(self._base_url + "search", params=search)
content = parse(response.content, namespaceHTMLElements=False)
for result in content.findall(".//*[@class='package-snippet']"):
name = result.find("h3/*[@class='package-snippet__name']").text
version = result.find("h3/*[@class='package-snippet__version']").text
if not name or not version:
continue
description = result.find("p[@class='package-snippet__description']").text
if not description:
description = ""
try:
result = Package(name, version, description)
result.description = to_str(description.strip())
results.append(result)
except ParseVersionError:
self._log(
'Unable to parse version "{}" for the {} package, skipping'.format(
version, name
),
level="debug",
)
return results
def get_package_info(self, name): # type: (str) -> dict
"""
Return the package information given its name.
The information is returned from the cache if it exists
or retrieved from the remote server.
"""
if self._disable_cache:
return self._get_package_info(name)
return self._cache.store("packages").remember_forever(
name, lambda: self._get_package_info(name)
)
def _get_package_info(self, name): # type: (str) -> dict
data = self._get("pypi/{}/json".format(name))
if data is None:
raise PackageNotFound("Package [{}] not found.".format(name))
return data
def get_release_info(self, name, version): # type: (str, str) -> PackageInfo
"""
Return the release information given a package name and a version.
The information is returned from the cache if it exists
or retrieved from the remote server.
"""
if self._disable_cache:
return PackageInfo.load(self._get_release_info(name, version))
cached = self._cache.remember_forever(
"{}:{}".format(name, version), lambda: self._get_release_info(name, version)
)
cache_version = cached.get("_cache_version", "0.0.0")
if parse_constraint(cache_version) != self.CACHE_VERSION:
# The cache must be updated
self._log(
"The cache for {} {} is outdated. Refreshing.".format(name, version),
level="debug",
)
cached = self._get_release_info(name, version)
self._cache.forever("{}:{}".format(name, version), cached)
return PackageInfo.load(cached)
def find_links_for_package(self, package):
json_data = self._get("pypi/{}/{}/json".format(package.name, package.version))
if json_data is None:
return []
links = []
for url in json_data["urls"]:
h = "sha256={}".format(url["digests"]["sha256"])
links.append(Link(url["url"] + "#" + h))
return links
def _get_release_info(self, name, version): # type: (str, str) -> dict
self._log("Getting info for {} ({}) from PyPI".format(name, version), "debug")
json_data = self._get("pypi/{}/{}/json".format(name, version))
if json_data is None:
raise PackageNotFound("Package [{}] not found.".format(name))
info = json_data["info"]
data = PackageInfo(
name=info["name"],
version=info["version"],
summary=info["summary"],
platform=info["platform"],
requires_dist=info["requires_dist"],
requires_python=info["requires_python"],
files=info.get("files", []),
cache_version=str(self.CACHE_VERSION),
)
try:
version_info = json_data["releases"][version]
except KeyError:
version_info = []
for file_info in version_info:
data.files.append(
{
"file": file_info["filename"],
"hash": "sha256:" + file_info["digests"]["sha256"],
}
)
if self._fallback and data.requires_dist is None:
self._log("No dependencies found, downloading archives", level="debug")
# No dependencies set (along with other information)
# This might be due to actually no dependencies
# or badly set metadata when uploading
# So, we need to make sure there is actually no
# dependencies by introspecting packages
urls = defaultdict(list)
for url in json_data["urls"]:
# Only get sdist and wheels if they exist
dist_type = url["packagetype"]
if dist_type not in ["sdist", "bdist_wheel"]:
continue
urls[dist_type].append(url["url"])
if not urls:
return data.asdict()
info = self._get_info_from_urls(urls)
data.requires_dist = info.requires_dist
if not data.requires_python:
data.requires_python = info.requires_python
return data.asdict()
def _get(self, endpoint): # type: (str) -> Union[dict, None]
try:
json_response = self.session.get(self._base_url + endpoint)
except requests.exceptions.TooManyRedirects:
# Cache control redirect loop.
# We try to remove the cache and try again
self._cache_control_cache.delete(self._base_url + endpoint)
json_response = self.session.get(self._base_url + endpoint)
if json_response.status_code == 404:
return None
json_data = json_response.json()
return json_data
def _get_info_from_urls(self, urls): # type: (Dict[str, List[str]]) -> PackageInfo
# Checking wheels first as they are more likely to hold
# the necessary information
if "bdist_wheel" in urls:
# Check fo a universal wheel
wheels = urls["bdist_wheel"]
universal_wheel = None
universal_python2_wheel = None
universal_python3_wheel = None
platform_specific_wheels = []
for wheel in wheels:
link = Link(wheel)
m = wheel_file_re.match(link.filename)
if not m:
continue
pyver = m.group("pyver")
abi = m.group("abi")
plat = m.group("plat")
if abi == "none" and plat == "any":
# Universal wheel
if pyver == "py2.py3":
# Any Python
universal_wheel = wheel
elif pyver == "py2":
universal_python2_wheel = wheel
else:
universal_python3_wheel = wheel
else:
platform_specific_wheels.append(wheel)
if universal_wheel is not None:
return self._get_info_from_wheel(universal_wheel)
info = None
if universal_python2_wheel and universal_python3_wheel:
info = self._get_info_from_wheel(universal_python2_wheel)
py3_info = self._get_info_from_wheel(universal_python3_wheel)
if py3_info.requires_dist:
if not info.requires_dist:
info.requires_dist = py3_info.requires_dist
return info
py2_requires_dist = set(
dependency_from_pep_508(r).to_pep_508()
for r in info.requires_dist
)
py3_requires_dist = set(
dependency_from_pep_508(r).to_pep_508()
for r in py3_info.requires_dist
)
base_requires_dist = py2_requires_dist & py3_requires_dist
py2_only_requires_dist = py2_requires_dist - py3_requires_dist
py3_only_requires_dist = py3_requires_dist - py2_requires_dist
# Normalizing requires_dist
requires_dist = list(base_requires_dist)
for requirement in py2_only_requires_dist:
dep = dependency_from_pep_508(requirement)
dep.marker = dep.marker.intersect(
parse_marker("python_version == '2.7'")
)
requires_dist.append(dep.to_pep_508())
for requirement in py3_only_requires_dist:
dep = dependency_from_pep_508(requirement)
dep.marker = dep.marker.intersect(
parse_marker("python_version >= '3'")
)
requires_dist.append(dep.to_pep_508())
info.requires_dist = sorted(list(set(requires_dist)))
if info:
return info
# Prefer non platform specific wheels
if universal_python3_wheel:
return self._get_info_from_wheel(universal_python3_wheel)
if universal_python2_wheel:
return self._get_info_from_wheel(universal_python2_wheel)
if platform_specific_wheels and "sdist" not in urls:
# Pick the first wheel available and hope for the best
return self._get_info_from_wheel(platform_specific_wheels[0])
return self._get_info_from_sdist(urls["sdist"][0])
def _get_info_from_wheel(self, url): # type: (str) -> PackageInfo
self._log(
"Downloading wheel: {}".format(urlparse.urlparse(url).path.rsplit("/")[-1]),
level="debug",
)
filename = os.path.basename(urlparse.urlparse(url).path.rsplit("/")[-1])
with temporary_directory() as temp_dir:
filepath = Path(temp_dir) / filename
self._download(url, str(filepath))
return PackageInfo.from_wheel(filepath)
def _get_info_from_sdist(self, url): # type: (str) -> PackageInfo
self._log(
"Downloading sdist: {}".format(urlparse.urlparse(url).path.rsplit("/")[-1]),
level="debug",
)
filename = os.path.basename(urlparse.urlparse(url).path)
with temporary_directory() as temp_dir:
filepath = Path(temp_dir) / filename
self._download(url, str(filepath))
return PackageInfo.from_sdist(filepath)
def _download(self, url, dest): # type: (str, str) -> None
return download_file(url, dest, session=self.session)
def _log(self, msg, level="info"):
getattr(logger, level)("<debug>{}:</debug> {}".format(self._name, msg))
| 35.909692 | 88 | 0.57333 | [
"MIT"
] | KevinArellano94/Python-Supabase | venv/Lib/site-packages/poetry/repositories/pypi_repository.py | 16,303 | Python |
#Hkr
import msvcrt
import os
import sys
import random
from ctypes import windll, byref, wintypes
from ctypes.wintypes import SMALL_RECT
STDOUT = -11
WIN_X = 100
WIN_Y = 60
hdl = windll.kernel32.GetStdHandle(STDOUT)
rect = wintypes.SMALL_RECT(0, 0, WIN_X, WIN_Y) # (left, top, right, bottom)
windll.kernel32.SetConsoleWindowInfo(hdl, True, byref(rect))
os.system('')
# 72
# 75 80 77
def Hello():
posx = 3
posy = 3
sys.stdout.write('WELCOME! press any key to continue\n')
while True:
key = ord(msvcrt.getch())
if key == 13:
sys.stdout.write('\x1Bc')
elif key == 224:
key = ord(msvcrt.getch())
if key == 72:
posy -=1
elif key == 75:
posx -=1
elif key == 80:
posy +=1
elif key == 77:
posx +=1
if posx < 0:
posx = 0
if posy < 0:
posy = 0
if posx > WIN_Y-2:
posx = WIN_Y-2
if posy > WIN_X-2:
posy = WIN_X-2
sys.stdout.write(f"\x1B[48;2;{random.randrange(0,255)};{random.randrange(0,255)};{random.randrange(0,255)}m\x1B[{posy};{posx}H \n")
def Exit():
exit(0)
selected = 0
menu = {
"run":Hello,
"exit":Exit
}
sys.stdout.write('WELCOME! press any key to continue\n\033[?25h')
while True:
key = ord(msvcrt.getch())
if key == 119:
selected = (selected-1) % len(menu)
elif key == 115:
selected = (selected+1) % len(menu)
elif key == 13:
sys.stdout.write('\x1Bc')
menu[list(menu.keys())[selected]]()
sys.stdout.write('\x1Bc')
for elem_num in range(len(menu)):
if elem_num == selected:
sys.stdout.write(f'> {list(menu.keys())[elem_num]}\n')
else:
sys.stdout.write(f' {list(menu.keys())[elem_num]}\n')
sys.stdout.write('\x1Bc') | 25.293333 | 139 | 0.542963 | [
"MIT"
] | NewLife1324/PyStorage | New Tests.py | 1,897 | Python |
from django.utils.cache import get_conditional_response
from django.utils.http import http_date, parse_http_date_safe, unquote_etag
class ConditionalGetMiddleware(object):
"""
Handles conditional GET operations. If the response has an ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers.
"""
def process_response(self, request, response):
response['Date'] = http_date()
if not response.streaming and not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
etag = response.get('ETag')
last_modified = response.get('Last-Modified')
if last_modified:
last_modified = parse_http_date_safe(last_modified)
if etag or last_modified:
return get_conditional_response(
request,
etag=unquote_etag(etag),
last_modified=last_modified,
response=response,
)
return response
| 35.65625 | 80 | 0.669588 | [
"MIT"
] | CharleyFarley/ovvio | venv/lib/python2.7/site-packages/django/middleware/http.py | 1,141 | Python |
import os.path
class Settings:
def __init__(self):
self.entry_point = os.path.expanduser('facts.graft')
self.userfacts = os.path.expanduser('~/.facts/user.yml')
self.userpath = os.path.expanduser('~/.facts/grafts')
settings = Settings()
| 22.416667 | 64 | 0.657993 | [
"BSD-3-Clause"
] | johnnoone/facts | facts/conf.py | 269 | Python |
from setuptools import setup,find_packages
import os
import shutil
#remove the dist folder first if exists
if os.path.exists("dist"):
shutil.rmtree("dist")
def readme():
with open('README.rst') as f:
return(f.read())
VERSION = '1.0.53'
def write_version_py(filename='SigProfilerTopography/version.py'):
# Copied from numpy setup.py
cnt = """
# THIS FILE IS GENERATED FROM SIGPROFILERTOPOGRAPHY SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
"""
fh = open(filename, 'w')
fh.write(cnt % {'version': VERSION,})
fh.close()
write_version_py()
setup(name="SigProfilerTopography",
version=VERSION,
author="Burcak Otlu",
author_email="[email protected]",
description="SigProfilerTopography provides topography analyses for substitutions, dinucleotides and indels for all given samples.",
url="https://github.com/AlexandrovLab/SigProfilerTopography",
license='UCSD',
packages=find_packages(),
install_requires=[
"SigProfilerMatrixGenerator>=1.1.27",
"SigProfilerSimulator>=1.1.2",
"XlsxWriter>=1.3.7",
"pandas>=1.1.5",
"numpy>=1.20.1",
"matplotlib>=2.2.2",
"scipy>=1.1.0",
"statsmodels>=0.9.0",
"fastrand>=1.2",
"psutil>=5.6.3"],
include_package_data=True,
zip_safe=False)
| 26 | 136 | 0.681538 | [
"BSD-2-Clause"
] | AlexandrovLab/SigProfilerTopography | setup.py | 1,300 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmts.endpoint import endpoint_data
class SubmitFpCompareJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Mts', '2014-06-18', 'SubmitFpCompareJob','mts')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_FpDBId(self):
return self.get_query_params().get('FpDBId')
def set_FpDBId(self,FpDBId):
self.add_query_param('FpDBId',FpDBId)
def get_MasterMedia(self):
return self.get_query_params().get('MasterMedia')
def set_MasterMedia(self,MasterMedia):
self.add_query_param('MasterMedia',MasterMedia)
def get_UserData(self):
return self.get_query_params().get('UserData')
def set_UserData(self,UserData):
self.add_query_param('UserData',UserData)
def get_QueryMedia(self):
return self.get_query_params().get('QueryMedia')
def set_QueryMedia(self,QueryMedia):
self.add_query_param('QueryMedia',QueryMedia)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_PipelineId(self):
return self.get_query_params().get('PipelineId')
def set_PipelineId(self,PipelineId):
self.add_query_param('PipelineId',PipelineId)
def get_MatchedFrameStorage(self):
return self.get_query_params().get('MatchedFrameStorage')
def set_MatchedFrameStorage(self,MatchedFrameStorage):
self.add_query_param('MatchedFrameStorage',MatchedFrameStorage) | 33.717391 | 77 | 0.770471 | [
"Apache-2.0"
] | ankitdobhal/aliyun-openapi-python-sdk | aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/SubmitFpCompareJobRequest.py | 3,102 | Python |
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowWhitelistResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'whitelist': 'WhitelistResp'
}
attribute_map = {
'whitelist': 'whitelist'
}
def __init__(self, whitelist=None):
"""ShowWhitelistResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._whitelist = None
self.discriminator = None
if whitelist is not None:
self.whitelist = whitelist
@property
def whitelist(self):
"""Gets the whitelist of this ShowWhitelistResponse.
:return: The whitelist of this ShowWhitelistResponse.
:rtype: WhitelistResp
"""
return self._whitelist
@whitelist.setter
def whitelist(self, whitelist):
"""Sets the whitelist of this ShowWhitelistResponse.
:param whitelist: The whitelist of this ShowWhitelistResponse.
:type: WhitelistResp
"""
self._whitelist = whitelist
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowWhitelistResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.385321 | 74 | 0.555285 | [
"Apache-2.0"
] | Adek06/huaweicloud-sdk-python-v3 | huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/show_whitelist_response.py | 2,876 | Python |
"""optik.option_parser
Provides the OptionParser and Values classes.
"""
__revision__ = "/home/scons/scons/branch.0/baseline/src/engine/SCons/Optik/option_parser.py 0.96.90.D001 2005/02/15 20:11:37 knight"
# Original Optik revision this is based on:
__Optik_revision__ = "option_parser.py,v 1.38.2.1 2002/07/23 01:51:14 gward Exp"
# Copyright (c) 2001 Gregory P. Ward. All rights reserved.
# See the README.txt distributed with Optik for licensing terms.
# created 2001/10/17, GPW (from optik.py)
import sys, os
import string
import types
from SCons.Optik.option import Option, NO_DEFAULT
from SCons.Optik.errors import OptionConflictError, OptionValueError, BadOptionError
def get_prog_name ():
return os.path.basename(sys.argv[0])
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
class Values:
def __init__ (self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def _update_careful (self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if dict.has_key(attr):
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose (self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update (self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError, "invalid update mode: %s" % (repr(mode),)
def read_module (self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file (self, filename, mode="careful"):
vars = {}
execfile(filename, vars)
self._update(vars, mode)
def ensure_value (self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionParser:
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (os.path.basename(sys.argv[0])).
option_list : [Option]
the list of all options accepted on the command-line of
this program
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times.
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary.
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination.
allow_interspersed_args : boolean = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__ (self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error"):
self.set_usage(usage)
self.option_class = option_class
self.version = version
self.set_conflict_handler(conflict_handler)
self.allow_interspersed_args = 1
# Create the various lists and dicts that constitute the
# "option list". See class docstring for details about
# each attribute.
self._create_option_list()
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and the STD_VERSION_OPTION global (if 'version'
# supplied).
self._populate_option_list(option_list)
self._init_parsing_state()
# -- Private methods -----------------------------------------------
# (used by the constructor)
def _create_option_list (self):
self.option_list = []
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _populate_option_list (self, option_list):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
def _init_parsing_state (self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage (self, usage):
if usage is None:
self.usage = "usage: %prog [options]"
elif usage is SUPPRESS_USAGE:
self.usage = None
else:
self.usage = usage
def enable_interspersed_args (self):
self.allow_interspersed_args = 1
def disable_interspersed_args (self):
self.allow_interspersed_args = 0
def set_conflict_handler (self, handler):
if handler not in ("ignore", "error", "resolve"):
raise ValueError, "invalid conflict_resolution value %s" % (repr(handler),)
self.conflict_handler = handler
def set_default (self, dest, value):
self.defaults[dest] = value
def set_defaults (self, **kwargs):
self.defaults.update(kwargs)
def get_default_values(self):
return Values(self.defaults)
# -- Option-adding methods -----------------------------------------
def _check_conflict (self, option):
conflict_opts = []
for opt in option._short_opts:
if self._short_opt.has_key(opt):
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if self._long_opt.has_key(opt):
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "ignore": # behaviour for Optik 1.0, 1.1
pass
elif handler == "error": # new in 1.2
raise OptionConflictError(
"conflicting option string(s): %s"
% string.join( map( lambda x: x[0], conflict_opts),", "),
option)
elif handler == "resolve": # new in 1.2
for (opt, c_option) in conflict_opts:
if len(opt)>2 and opt[:2]=="--":
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
self.option_list.remove(c_option)
def add_option (self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if type(args[0]) is types.StringType:
option = apply(self.option_class,args, kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError, "not an Option instance: %s" % (repr(option),)
else:
raise TypeError, "invalid arguments"
self._check_conflict(option)
self.option_list.append(option)
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif not self.defaults.has_key(option.dest):
self.defaults[option.dest] = None
def add_options (self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option (self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option (self, opt_str):
return (self._short_opt.has_key(opt_str) or
self._long_opt.has_key(opt_str))
def remove_option (self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %s" % (repr(opt_str),))
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
self.option_list.remove(option)
# -- Option-parsing methods ----------------------------------------
def _get_args (self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args (self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError), err:
self.error(err.msg)
args = largs + rargs
return self.check_values(values, args)
def check_values (self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args (self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt (self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbrevation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt (self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = string.split(arg,"=", 1)
rargs.insert(0, next_arg)
had_explicit_value = 1
else:
opt = arg
had_explicit_value = 0
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error("%s option requires a value" % opt)
else:
self.error("%s option requires %d values"
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error("%s option does not take a value" % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts (self, rargs, values):
arg = rargs.pop(0)
stop = 0
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i = i+1 # we have consumed a character
if not option:
self.error("no such option: %s" % opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = 1
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error("%s option requires a value" % opt)
else:
self.error("%s option requires %s values"
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Output/error methods ------------------------------------------
def error (self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
sys.stderr.write("\nSCons error: %s\n" % msg)
sys.exit(2)
def print_usage (self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if file is None:
file = sys.stdout
if self.usage:
usage = string.replace(self.usage,"%prog", get_prog_name())
file.write(usage + "\n")
def print_version (self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if file is None:
file = sys.stdout
if self.version:
version = string.replace(self.version,"%prog", get_prog_name())
file.write(version+"\n")
def print_help (self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
# SCons: don't import wrap_text from distutils, use the
# copy we've included below, so we can avoid being dependent
# on having the right version of distutils installed.
#from distutils.fancy_getopt import wrap_text
if file is None:
file = sys.stdout
self.print_usage(file)
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
file.write("Options:\n")
width = 78 # assume 80 cols for now
option_help = [] # list of (string, string) tuples
lengths = []
for option in self.option_list:
takes_value = option.takes_value()
if takes_value:
metavar = option.metavar or string.upper(option.dest)
opts = [] # list of "-a" or "--foo=FILE" strings
if option.help is SUPPRESS_HELP:
continue
if takes_value:
for sopt in option._short_opts:
opts.append(sopt + ' ' + metavar)
for lopt in option._long_opts:
opts.append(lopt + "=" + metavar)
else:
for opt in option._short_opts + option._long_opts:
opts.append(opt)
opts = string.join(opts,", ")
option_help.append((opts, option.help))
lengths.append(len(opts))
max_opts = min(max(lengths), 26)
for (opts, help) in option_help:
# how much to indent lines 2 .. N of help text
indent_rest = 2 + max_opts + 2
help_width = width - indent_rest
if len(opts) > max_opts:
opts = " " + opts + "\n"
indent_first = indent_rest
else: # start help on same line as opts
opts = " %-*s " % (max_opts, opts)
indent_first = 0
file.write(opts)
if help:
help_lines = wrap_text(help, help_width)
file.write( "%*s%s\n" % (indent_first, "", help_lines[0]))
for line in help_lines[1:]:
file.write(" %*s%s\n" % (indent_rest, "", line))
elif opts[-1] != "\n":
file.write("\n")
# class OptionParser
def _match_abbrev (s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if wordmap.has_key(s):
return s
else:
# Isolate all words with s as a prefix.
possibilities = []
ls = len(s)
for word in wordmap.keys():
if len(word)>=ls and word[:ls]==s:
possibilities.append(word)
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError("no such option: %s" % s)
else:
# More than one possible completion: ambiguous prefix.
raise BadOptionError("ambiguous option: %s (%s?)"
% (s, string.join(possibilities,", ")))
# SCons: Include a snarfed copy of wrap_text(), so we're not dependent
# on the right version of distutils being installed.
import re
WS_TRANS = string.maketrans(string.whitespace, ' ' * len(string.whitespace))
def wrap_text (text, width):
"""wrap_text(text : string, width : int) -> [string]
Split 'text' into multiple lines of no more than 'width' characters
each, and return the list of strings that results.
"""
if text is None:
return []
if len(text) <= width:
return [text]
text = string.expandtabs(text)
text = string.translate(text, WS_TRANS)
chunks = re.split(r'( +|-+)', text)
chunks = filter(None, chunks) # ' - ' results in empty strings
lines = []
while chunks:
cur_line = [] # list of chunks (to-be-joined)
cur_len = 0 # length of current line
while chunks:
l = len(chunks[0])
if cur_len + l <= width: # can squeeze (at least) this chunk in
cur_line.append(chunks[0])
del chunks[0]
cur_len = cur_len + l
else: # this line is full
# drop last chunk if all space
if cur_line and cur_line[-1][0] == ' ':
del cur_line[-1]
break
if chunks: # any chunks left to process?
# if the current line is still empty, then we had a single
# chunk that's too big too fit on a line -- so we break
# down and break it up at the line width
if cur_len == 0:
cur_line.append(chunks[0][0:width])
chunks[0] = chunks[0][width:]
# all-whitespace chunks at the end of a line can be discarded
# (and we know from the re.split above that if a chunk has
# *any* whitespace, it is *all* whitespace)
if chunks[0][0] == ' ':
del chunks[0]
# and store this line in the list-of-all-lines -- as a single
# string, of course!
lines.append(string.join(cur_line, ''))
# while chunks
return lines
# wrap_text ()
| 36.585499 | 132 | 0.561584 | [
"MIT"
] | luaman/twilight | scons-local-0.96.90/SCons/Optik/option_parser.py | 26,744 | Python |
from django.urls import path
# urlpatterns = [
# path("/register", )
# ] | 15.4 | 28 | 0.61039 | [
"MIT"
] | MohanSai1997/django-forum-engine | django_forum_engine/account/urls.py | 77 | Python |
from . import exact_diagonalisation, models, tensornetworks
| 30 | 59 | 0.85 | [
"MIT"
] | Jakob-Unfried/msc-legacy | grad_tn/__init__.py | 60 | Python |
from django.contrib import admin
from .models import UserProfile,ProfileFeedItem
# Register your models here.
admin.site.register(UserProfile)
admin.site.register(ProfileFeedItem)
| 22.75 | 47 | 0.835165 | [
"MIT"
] | abirhasansuvro/Profiles-Rest-Api | profiles_api/admin.py | 182 | Python |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from collections import namedtuple
from .utils import at_bits
GEN_GLOBALPOOL = "CNN_GlobalPool"
# /** \brief CNN_GlobalPool
# * Generator for Global Pooling (Max or Average)
# *
# \param Name: Name of the generated user kernel
# \param Ctrl: Overide generator default options (TileOrientation, Parallel Features), Def=(TILE_HOR, 1)
# \param In_DataSize: 1: byte, 2: half word, 4: word
# \param Out_DataSize: 1: byte, 2: half word, 4: word
# \param In_Q: In fixed point format
# \param Out_Q: Out fixed point format
# \param In_InL3: 0: In is in L2, 1: In is in L3 memory
# \param Out_InL3: 0: Out is in L2, 1: Out is in L3 memory
# \param InFeat: Number of input feature's maps
# \param OutFeat: Number of output feature's maps (InFeat has to be equal to OutFeat for these generators
# \param Width: Number of columns of a given feature map
# \param Height: Number of lines of a given feature map
# \param PoolOper: KOP_GLOBAL_MAXPOOL or KOP_GLOBAL_AVGPOOL
GlobalPoolATParam = namedtuple('GlobalPoolATParam', [
"GlobalPoolOper"
])
def gen_globalpool_at_params(params):
return GlobalPoolATParam(
GlobalPoolOper="KOP_GLOBAL_AVGPOOL" if params.pool_type == "average" else "KOP_GLOBAL_MAXPOOL"
)
def gen_at_globalpool(code_block, name, in_q, out_q,
in_dim, out_dim, at_globalpool, gen_ctrl=None, at_ver=3):
if gen_ctrl is None:
gen_ctrl = "0"
else:
raise NotImplementedError("genctrl is not yet implemented")
if at_ver < 3:
code_block.write('{}("{}", {}, {}, {}, 1, 1, {}, {}, {}, {}, {});',
GEN_GLOBALPOOL, name, gen_ctrl,
at_bits(in_q), at_bits(out_q), in_dim.shape[0], out_dim.shape[0],
in_dim.shape[1], in_dim.shape[2], at_globalpool.GlobalPoolOper)
else:
code_block.write('{}("{}", {}, {}, {}, {}, {}, 1, 1, {}, {}, {}, {}, {});',
GEN_GLOBALPOOL, name, gen_ctrl,
at_bits(in_q), at_bits(
out_q), in_q.q, out_q.q, in_dim.shape[0], out_dim.shape[0],
in_dim.shape[1], in_dim.shape[2], at_globalpool.GlobalPoolOper)
| 43.507042 | 121 | 0.622855 | [
"Apache-2.0"
] | VishalSharma0309/gap_sdk | tools/nntool/generation/at_generators/cnn_global_pool.py | 3,089 | Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
#
# My site is free software; you can redistribute it and/or modify it under
# the terms of the MIT License; see LICENSE file for more details.
"""Invenio digital library framework."""
import os
from setuptools import find_packages, setup
readme = open('README.rst').read()
packages = find_packages()
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('my_site', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='my-site',
version=version,
description=__doc__,
long_description=readme,
keywords='my-site Invenio',
license='MIT',
author='CERN',
author_email='[email protected]',
url='https://github.com/my-site/my-site',
packages=packages,
zip_safe=False,
include_package_data=True,
platforms='any',
entry_points={
'console_scripts': [
'my-site = invenio_app.cli:cli',
],
'invenio_base.apps': [
'my_site_records = my_site.records:Mysite',
],
'invenio_base.blueprints': [
'my_site = my_site.theme.views:blueprint',
'my_site_records = my_site.records.views:blueprint',
],
'invenio_assets.webpack': [
'my_site_theme = my_site.theme.webpack:theme',
],
'invenio_config.module': [
'my_site = my_site.config',
],
'invenio_i18n.translations': [
'messages = my_site',
],
'invenio_base.api_apps': [
'my_site = my_site.records:Mysite',
'authors = my_site.authors:Authors',
],
'invenio_pidstore.fetchers': [
'authid = my_site.authors.fetchers:author_pid_fetcher',
],
'invenio_pidstore.minters': [
'authid = my_site.authors.minters:author_pid_minter',
],
'invenio_jsonschemas.schemas': [
'my_site = my_site.records.jsonschemas',
'authors = my_site.authors.jsonschemas',
],
'invenio_search.mappings': [
'records = my_site.records.mappings',
'authors = my_site.authors.mappings',
],
},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Development Status :: 3 - Alpha',
],
)
| 30.022472 | 74 | 0.587201 | [
"MIT"
] | AoNoOokami/training | 08-data-models-from-scratch/solution/my-site/setup.py | 2,672 | Python |
# This file is part of
# the galxy-chop project (https://github.com/vcristiani/galaxy-chop)
# Copyright (c) 2020, Valeria Cristiani
# License: MIT
# Full Text: https://github.com/vcristiani/galaxy-chop/blob/master/LICENSE.txt
"""Fixtures input data."""
# =============================================================================
# IMPORTS
# =============================================================================
import os
from pathlib import Path
import astropy.units as u
from galaxychop import core
import numpy as np
import pytest
# =============================================================================
# PATHS
# =============================================================================
PATH = Path(os.path.abspath(os.path.dirname(__file__)))
TEST_DATA_PATH = PATH / "test_data"
TEST_DATA_REAL_PATH = TEST_DATA_PATH / "real"
# =============================================================================
# Defining utility functions for mocking data
# =============================================================================
def rot_matrix_xaxis(theta=0):
"""
Rotation matrix of a transformation around X axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[1, 0, 0],
[0, np.cos(theta), -1 * np.sin(theta)],
[0, np.sin(theta), np.cos(theta)],
]
)
return A
def rot_matrix_yaxis(theta=0):
"""
Rotation matrix of a transformation around Y axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-1 * np.sin(theta), 0, np.cos(theta)],
]
)
return A
def rot_matrix_zaxis(theta=0):
"""
Rotation matrix of a transformation around Z axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), -1 * np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
)
return A
def rotate(pos, vel, matrix):
"""
Rotate.
Applies the rotation `matrix` to a set of particles positions `pos` and
velocities `vel`
Parameters
----------
pos : `np.ndarray`, shape = (N_part, 3)
Positions of particles
vel : `np.ndarray`, shape = (N_part, 3)
Velocities of particles
matrix : `np.ndarray`
Rotation matrix, with shape (3, 3)
Returns
-------
pos_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, positions of particles
vel_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, velocities of particles
"""
pos_rot = pos @ matrix
vel_rot = vel @ matrix
return pos_rot, vel_rot
def distance(x, y, z, m):
"""
Distances calculator.
Calculate distances beetween particles.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
Returns
-------
dx, dy, dz: `np.ndarray`, shape = (N_part, N_part)
Distances between particles.
"""
N_part = len(m)
dx = np.zeros((N_part, N_part))
dy = np.zeros((N_part, N_part))
dz = np.zeros((N_part, N_part))
for i in range(0, N_part - 1):
for j in range(i + 1, N_part):
dx[i, j] = x[j] - x[i]
dy[i, j] = y[j] - y[i]
dz[i, j] = z[j] - z[i]
dx[j, i] = -dx[i, j]
dy[j, i] = -dy[i, j]
dz[j, i] = -dz[i, j]
return dx, dy, dz
def epot(x, y, z, m, eps=0.0):
"""
Potential energy with python.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
eps: `float`
Softening radius
Returns
-------
Upot: `np.ndarray`, shape = (N_part, 1)
Potential energy of particles
"""
G = 4.299e-6
N_part = len(m)
U = np.zeros((N_part, N_part))
dx, dy, dz = distance(x, y, z, m)
dist = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2 + eps ** 2)
for i in range(N_part - 1):
for j in range(i + 1, N_part):
U[i, j] = G * m[j] * m[i] / dist[i, j]
U[j, i] = U[i, j]
Upot = np.sum(U / m, axis=0)
return Upot
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def random_galaxy_params():
"""
Galaxy parameter for test.
This return a function of a dictionary with random params of a Galaxy
object
"""
def make(stars, gas, dm, seed):
random = np.random.default_rng(seed=seed)
x_s = random.random(stars)
y_s = random.random(stars)
z_s = random.random(stars)
vx_s = random.random(stars)
vy_s = random.random(stars)
vz_s = random.random(stars)
m_s = random.random(stars)
x_dm = random.random(dm)
y_dm = random.random(dm)
z_dm = random.random(dm)
vx_dm = random.random(dm)
vy_dm = random.random(dm)
vz_dm = random.random(dm)
m_dm = random.random(dm)
x_g = random.random(gas)
y_g = random.random(gas)
z_g = random.random(gas)
vx_g = random.random(gas)
vy_g = random.random(gas)
vz_g = random.random(gas)
m_g = random.random(gas)
params = {
"m_s": m_s,
"x_s": x_s,
"y_s": y_s,
"z_s": z_s,
"vx_s": vx_s,
"vy_s": vy_s,
"vz_s": vz_s,
"m_dm": m_dm,
"x_dm": x_dm,
"y_dm": y_dm,
"z_dm": z_dm,
"vx_dm": vx_dm,
"vy_dm": vy_dm,
"vz_dm": vz_dm,
"m_g": m_g,
"x_g": x_g,
"y_g": y_g,
"z_g": z_g,
"vx_g": vx_g,
"vy_g": vy_g,
"vz_g": vz_g,
}
return params
return make
@pytest.fixture(scope="session")
def solid_disk():
"""
Mock solid disk.
Creates a mock solid disc of particles with masses
and velocities.
"""
def make(N_part=100, rmax=30, rmin=2, omega=10, seed=42):
random = np.random.RandomState(seed=seed)
r = (rmax - rmin) * random.random_sample(size=N_part) + rmin
phi0 = 2 * np.pi * random.random_sample(size=N_part)
mass = 1.0e8 * np.ones_like(r)
x = r * np.cos(phi0)
y = r * np.sin(phi0)
z = 1 * random.random_sample(size=N_part) - 0.5
xdot = -1 * omega * r * np.sin(phi0)
ydot = omega * r * np.cos(phi0)
zdot = np.zeros_like(xdot)
pos = np.array([x, y, z]).T
vel = np.array([xdot, ydot, zdot]).T
return mass, pos, vel
return make
@pytest.fixture(scope="session")
def mock_dm_halo():
"""
Mock dark matter Halo.
Creates a mock DM halo of particles with masses
and velocities.
"""
def make(N_part=1000, rmax=100, seed=55):
random = np.random.RandomState(seed=seed)
r = random.random_sample(size=N_part) * rmax
cos_t = random.random_sample(size=N_part) * 2.0 - 1
phi0 = 2 * np.pi * random.random_sample(size=N_part)
sin_t = np.sqrt(1 - cos_t ** 2)
mass = 1.0e10 * np.ones_like(r)
x = r * sin_t * np.cos(phi0)
y = r * sin_t * np.sin(phi0)
z = r * cos_t
pos = np.array([x, y, z]).T
return mass, pos
return make
@pytest.fixture
def disc_zero_angle(solid_disk):
"""Disc with no angle of inclination."""
mass, pos, vel = solid_disk(N_part=1000)
return mass, pos, vel
@pytest.fixture
def disc_xrotation(solid_disk):
"""Disc rotated over x axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_xaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_yrotation(solid_disk):
"""Disc rotated over y axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_yaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_zrotation(solid_disk):
"""Disc rotated over z axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_zaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_particles(solid_disk):
"""Solid disc without velocities."""
mass, pos, vel = solid_disk(N_part=100)
return pos[:, 0], pos[:, 1], pos[:, 2], mass
@pytest.fixture
def disc_particles_all(solid_disk):
"""Solid disc with velocities."""
mass_s, pos_s, vel_s = solid_disk(N_part=100)
mass_g, pos_g, vel_g = solid_disk(N_part=100)
return mass_s, pos_s, vel_s, mass_g, pos_g, vel_g
@pytest.fixture(scope="session")
def halo_particles(mock_dm_halo):
"""Spherical mock halo."""
def make(N_part=100, seed=None):
random = np.random.RandomState(seed=seed)
mass_dm, pos_dm = mock_dm_halo(N_part=N_part)
vel_dm = random.random_sample(size=(N_part, 3))
return mass_dm, pos_dm, vel_dm
return make
@pytest.fixture
def mock_galaxy(disc_particles_all, halo_particles):
"""Mock galaxy."""
(mass_s, pos_s, vel_s, mass_g, pos_g, vel_g) = disc_particles_all
mass_dm, pos_dm, vel_dm = halo_particles(N_part=100, seed=42)
g = core.Galaxy(
m_s=mass_s * u.M_sun,
x_s=pos_s[:, 0] * u.kpc,
y_s=pos_s[:, 1] * u.kpc,
z_s=pos_s[:, 2] * u.kpc,
vx_s=vel_s[:, 0] * (u.km / u.s),
vy_s=vel_s[:, 1] * (u.km / u.s),
vz_s=vel_s[:, 2] * (u.km / u.s),
m_dm=mass_dm * u.M_sun,
x_dm=pos_dm[:, 0] * u.kpc,
y_dm=pos_dm[:, 1] * u.kpc,
z_dm=pos_dm[:, 2] * u.kpc,
vx_dm=vel_dm[:, 0] * (u.km / u.s),
vy_dm=vel_dm[:, 1] * (u.km / u.s),
vz_dm=vel_dm[:, 2] * (u.km / u.s),
m_g=mass_g * u.M_sun,
x_g=pos_g[:, 0] * u.kpc,
y_g=pos_g[:, 1] * u.kpc,
z_g=pos_g[:, 2] * u.kpc,
vx_g=vel_g[:, 0] * (u.km / u.s),
vy_g=vel_g[:, 1] * (u.km / u.s),
vz_g=vel_g[:, 2] * (u.km / u.s),
)
return g
@pytest.fixture
def mock_real_galaxy():
"""Mock real galaxy."""
dm = np.loadtxt(TEST_DATA_REAL_PATH / "dark.dat")
s = np.loadtxt(TEST_DATA_REAL_PATH / "star.dat")
g = np.loadtxt(TEST_DATA_REAL_PATH / "gas_.dat")
gal = core.Galaxy(
m_s=s[:, 0] * 1e10 * u.M_sun,
x_s=s[:, 1] * u.kpc,
y_s=s[:, 2] * u.kpc,
z_s=s[:, 3] * u.kpc,
vx_s=s[:, 4] * (u.km / u.s),
vy_s=s[:, 5] * (u.km / u.s),
vz_s=s[:, 6] * (u.km / u.s),
m_dm=dm[:, 0] * 1e10 * u.M_sun,
x_dm=dm[:, 1] * u.kpc,
y_dm=dm[:, 2] * u.kpc,
z_dm=dm[:, 3] * u.kpc,
vx_dm=dm[:, 4] * (u.km / u.s),
vy_dm=dm[:, 5] * (u.km / u.s),
vz_dm=dm[:, 6] * (u.km / u.s),
m_g=g[:, 0] * 1e10 * u.M_sun,
x_g=g[:, 1] * u.kpc,
y_g=g[:, 2] * u.kpc,
z_g=g[:, 3] * u.kpc,
vx_g=g[:, 4] * (u.km / u.s),
vy_g=g[:, 5] * (u.km / u.s),
vz_g=g[:, 6] * (u.km / u.s),
)
return gal
| 24.856842 | 79 | 0.502499 | [
"MIT"
] | vcristiani/galaxy-chop | tests/conftest.py | 11,807 | Python |
import numpy as np
import pytest
import pandas._testing as tm
from pandas.core.construction import sanitize_array
@pytest.mark.parametrize(
"values, dtype, expected",
[
([1, 2, 3], None, np.array([1, 2, 3], dtype=np.int64)),
(np.array([1, 2, 3]), None, np.array([1, 2, 3])),
(["1", "2", None], None, np.array(["1", "2", None])),
(["1", "2", None], np.dtype("str"), np.array(["1", "2", None])),
([1, 2, None], np.dtype("str"), np.array(["1", "2", None])),
],
)
def test_construct_1d_ndarray_preserving_na(values, dtype, expected):
result = sanitize_array(values, index=None, dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]"])
def test_construct_1d_ndarray_preserving_na_datetimelike(dtype):
arr = np.arange(5, dtype=np.int64).view(dtype)
expected = np.array(list(arr), dtype=object)
assert all(isinstance(x, type(arr[0])) for x in expected)
result = sanitize_array(arr, index=None, dtype=np.dtype(object))
tm.assert_numpy_array_equal(result, expected)
| 36.483871 | 73 | 0.626879 | [
"MIT"
] | alvinajacquelyn/COMP0016_2 | venv/Lib/site-packages/pandas/tests/dtypes/cast/test_construct_ndarray.py | 1,131 | Python |
import asyncio
from typing import (
Dict,
Iterable,
Optional,
Sequence,
Tuple,
cast,
)
from cancel_token import (
CancelToken,
)
from eth_utils import ValidationError, to_tuple
from eth.exceptions import (
BlockNotFound,
)
from eth2.beacon.helpers import (
compute_start_slot_of_epoch,
)
from eth2.beacon.chains.base import (
BaseBeaconChain,
)
from eth2.beacon.types.attestations import (
Attestation,
)
from eth2.beacon.types.blocks import (
BaseBeaconBlock,
)
from eth2.beacon.typing import (
Epoch,
Slot,
HashTreeRoot,
Version,
SigningRoot,
)
from eth2.beacon.constants import (
ZERO_SIGNING_ROOT,
)
from libp2p import (
initialize_default_swarm,
)
from libp2p.typing import TProtocol
from libp2p.crypto.keys import (
KeyPair,
)
from libp2p.host.basic_host import (
BasicHost,
)
from libp2p.network.network_interface import (
INetwork,
)
from libp2p.network.stream.net_stream_interface import (
INetStream,
)
from libp2p.peer.id import (
ID,
)
from libp2p.peer.peerinfo import (
PeerInfo,
)
from libp2p.peer.peerstore import (
PeerStore,
)
from libp2p.pubsub.pubsub import (
Pubsub,
)
from libp2p.pubsub.gossipsub import (
GossipSub,
)
from libp2p.security.base_transport import BaseSecureTransport
from libp2p.security.insecure.transport import PLAINTEXT_PROTOCOL_ID, InsecureTransport
from libp2p.stream_muxer.abc import IMuxedConn
from libp2p.stream_muxer.mplex.exceptions import MplexStreamEOF, MplexStreamReset
from libp2p.stream_muxer.mplex.mplex import MPLEX_PROTOCOL_ID, Mplex
from multiaddr import (
Multiaddr,
protocols,
)
import ssz
from p2p.service import (
BaseService,
)
from .configs import (
GOSSIPSUB_PROTOCOL_ID,
GoodbyeReasonCode,
GossipsubParams,
PUBSUB_TOPIC_BEACON_BLOCK,
PUBSUB_TOPIC_BEACON_ATTESTATION,
REQ_RESP_BEACON_BLOCKS,
REQ_RESP_GOODBYE,
REQ_RESP_HELLO,
REQ_RESP_RECENT_BEACON_BLOCKS,
ResponseCode,
)
from .exceptions import (
HandshakeFailure,
ReadMessageFailure,
RequestFailure,
WriteMessageFailure,
)
from .messages import (
Goodbye,
HelloRequest,
BeaconBlocksRequest,
BeaconBlocksResponse,
RecentBeaconBlocksRequest,
RecentBeaconBlocksResponse,
)
from .topic_validators import (
get_beacon_attestation_validator,
get_beacon_block_validator,
)
from .utils import (
make_rpc_v1_ssz_protocol_id,
make_tcp_ip_maddr,
read_req,
read_resp,
write_req,
write_resp,
)
from dataclasses import dataclass
import operator
from eth_utils.toolz import first
REQ_RESP_HELLO_SSZ = make_rpc_v1_ssz_protocol_id(REQ_RESP_HELLO)
REQ_RESP_GOODBYE_SSZ = make_rpc_v1_ssz_protocol_id(REQ_RESP_GOODBYE)
REQ_RESP_BEACON_BLOCKS_SSZ = make_rpc_v1_ssz_protocol_id(REQ_RESP_BEACON_BLOCKS)
REQ_RESP_RECENT_BEACON_BLOCKS_SSZ = make_rpc_v1_ssz_protocol_id(
REQ_RESP_RECENT_BEACON_BLOCKS
)
@dataclass
class Peer:
node: "Node"
_id: ID
fork_version: Version # noqa: E701
finalized_root: SigningRoot
finalized_epoch: Epoch
head_root: HashTreeRoot
head_slot: Slot
@classmethod
def from_hello_request(
cls, node: "Node", peer_id: ID, request: HelloRequest
) -> "Peer":
return cls(
node=node,
_id=peer_id,
fork_version=request.fork_version,
finalized_root=request.finalized_root,
finalized_epoch=request.finalized_epoch,
head_root=request.head_root,
head_slot=request.head_slot,
)
async def request_beacon_blocks(
self, start_slot: Slot, count: int, step: int = 1
) -> Tuple[BaseBeaconBlock, ...]:
return await self.node.request_beacon_blocks(
self._id,
head_block_root=self.head_root,
start_slot=start_slot,
count=count,
step=step,
)
async def request_recent_beacon_blocks(
self, block_roots: Sequence[HashTreeRoot]
) -> Tuple[BaseBeaconBlock, ...]:
return await self.node.request_recent_beacon_blocks(self._id, block_roots)
class PeerPool:
peers: Dict[ID, Peer]
def __init__(self) -> None:
self.peers = {}
def add(self, peer: Peer) -> None:
self.peers[peer._id] = peer
def remove(self, peer_id: ID) -> None:
del self.peers[peer_id]
def __contains__(self, peer_id: ID) -> bool:
return peer_id in self.peers.keys()
def __len__(self) -> int:
return len(self.peers)
def get_best(self, field: str) -> Peer:
sorted_peers = sorted(
self.peers.values(), key=operator.attrgetter(field), reverse=True
)
return first(sorted_peers)
def get_best_head_slot_peer(self) -> Peer:
return self.get_best("head_slot")
class Node(BaseService):
_is_started: bool = False
key_pair: KeyPair
listen_ip: str
listen_port: int
host: BasicHost
pubsub: Pubsub
bootstrap_nodes: Optional[Tuple[Multiaddr, ...]]
preferred_nodes: Optional[Tuple[Multiaddr, ...]]
chain: BaseBeaconChain
handshaked_peers: PeerPool = None
def __init__(
self,
key_pair: KeyPair,
listen_ip: str,
listen_port: int,
chain: BaseBeaconChain,
security_protocol_ops: Dict[TProtocol, BaseSecureTransport] = None,
muxer_protocol_ops: Dict[TProtocol, IMuxedConn] = None,
gossipsub_params: Optional[GossipsubParams] = None,
cancel_token: CancelToken = None,
bootstrap_nodes: Tuple[Multiaddr, ...] = None,
preferred_nodes: Tuple[Multiaddr, ...] = None) -> None:
super().__init__(cancel_token)
self.listen_ip = listen_ip
self.listen_port = listen_port
self.key_pair = key_pair
self.bootstrap_nodes = bootstrap_nodes
self.preferred_nodes = preferred_nodes
# TODO: Add key and peer_id to the peerstore
if security_protocol_ops is None:
security_protocol_ops = {
PLAINTEXT_PROTOCOL_ID: InsecureTransport(key_pair)
}
if muxer_protocol_ops is None:
muxer_protocol_ops = {MPLEX_PROTOCOL_ID: Mplex}
network: INetwork = initialize_default_swarm(
key_pair=key_pair,
transport_opt=[self.listen_maddr],
muxer_opt=muxer_protocol_ops,
sec_opt=security_protocol_ops,
peerstore_opt=None, # let the function initialize it
disc_opt=None, # no routing required here
)
self.host = BasicHost(network=network, router=None)
if gossipsub_params is None:
gossipsub_params = GossipsubParams()
gossipsub_router = GossipSub(
protocols=[GOSSIPSUB_PROTOCOL_ID],
degree=gossipsub_params.DEGREE,
degree_low=gossipsub_params.DEGREE_LOW,
degree_high=gossipsub_params.DEGREE_HIGH,
time_to_live=gossipsub_params.FANOUT_TTL,
gossip_window=gossipsub_params.GOSSIP_WINDOW,
gossip_history=gossipsub_params.GOSSIP_HISTORY,
heartbeat_interval=gossipsub_params.HEARTBEAT_INTERVAL,
)
self.pubsub = Pubsub(
host=self.host,
router=gossipsub_router,
my_id=self.peer_id,
)
self.chain = chain
self.handshaked_peers = PeerPool()
self.run_task(self.start())
@property
def is_started(self) -> bool:
return self._is_started
async def _run(self) -> None:
self.logger.info("libp2p node %s is up", self.listen_maddr)
await self.cancellation()
async def start(self) -> None:
# host
self._register_rpc_handlers()
# TODO: Register notifees
await self.host.get_network().listen(self.listen_maddr)
await self.connect_preferred_nodes()
# TODO: Connect bootstrap nodes?
# pubsub
await self.pubsub.subscribe(PUBSUB_TOPIC_BEACON_BLOCK)
await self.pubsub.subscribe(PUBSUB_TOPIC_BEACON_ATTESTATION)
self._setup_topic_validators()
self._is_started = True
def _setup_topic_validators(self) -> None:
self.pubsub.set_topic_validator(
PUBSUB_TOPIC_BEACON_BLOCK,
get_beacon_block_validator(self.chain),
False,
)
self.pubsub.set_topic_validator(
PUBSUB_TOPIC_BEACON_ATTESTATION,
get_beacon_attestation_validator(self.chain),
False,
)
async def dial_peer(self, ip: str, port: int, peer_id: ID) -> None:
"""
Dial the peer ``peer_id`` through the IPv4 protocol
"""
await self.host.connect(
PeerInfo(
peer_id=peer_id,
addrs=[make_tcp_ip_maddr(ip, port)],
)
)
async def dial_peer_maddr(self, maddr: Multiaddr) -> None:
"""
Parse `maddr`, get the ip:port and PeerID, and call `dial_peer` with the parameters.
"""
ip = maddr.value_for_protocol(protocols.P_IP4)
port = maddr.value_for_protocol(protocols.P_TCP)
peer_id = ID.from_base58(maddr.value_for_protocol(protocols.P_P2P))
await self.dial_peer(ip=ip, port=port, peer_id=peer_id)
async def connect_preferred_nodes(self) -> None:
if self.preferred_nodes is None or len(self.preferred_nodes) == 0:
return
await asyncio.wait([
self.dial_peer_maddr(node_maddr)
for node_maddr in self.preferred_nodes
])
async def disconnect_peer(self, peer_id: ID) -> None:
if peer_id in self.handshaked_peers:
self.logger.debug("Disconnect from %s", peer_id)
self.handshaked_peers.remove(peer_id)
await self.host.disconnect(peer_id)
else:
self.logger.debug("Already disconnected from %s", peer_id)
async def broadcast_beacon_block(self, block: BaseBeaconBlock) -> None:
await self._broadcast_data(PUBSUB_TOPIC_BEACON_BLOCK, ssz.encode(block))
async def broadcast_attestation(self, attestation: Attestation) -> None:
await self._broadcast_data(PUBSUB_TOPIC_BEACON_ATTESTATION, ssz.encode(attestation))
async def _broadcast_data(self, topic: str, data: bytes) -> None:
await self.pubsub.publish(topic, data)
@property
def peer_id(self) -> ID:
return self.host.get_id()
@property
def listen_maddr(self) -> Multiaddr:
return make_tcp_ip_maddr(self.listen_ip, self.listen_port)
@property
def listen_maddr_with_peer_id(self) -> Multiaddr:
return self.listen_maddr.encapsulate(Multiaddr(f"/p2p/{self.peer_id.to_base58()}"))
@property
def peer_store(self) -> PeerStore:
return self.host.get_network().peerstore
async def close(self) -> None:
# FIXME: Add `tear_down` to `Swarm` in the upstream
network = self.host.get_network()
for listener in network.listeners.values():
listener.server.close()
await listener.server.wait_closed()
# TODO: Add `close` in `Pubsub`
def _register_rpc_handlers(self) -> None:
self.host.set_stream_handler(REQ_RESP_HELLO_SSZ, self._handle_hello)
self.host.set_stream_handler(REQ_RESP_GOODBYE_SSZ, self._handle_goodbye)
self.host.set_stream_handler(REQ_RESP_BEACON_BLOCKS_SSZ, self._handle_beacon_blocks)
self.host.set_stream_handler(
REQ_RESP_RECENT_BEACON_BLOCKS_SSZ,
self._handle_recent_beacon_blocks,
)
#
# RPC Handlers
#
# TODO: Add a wrapper or decorator to handle the exceptions in handlers,
# to close the streams safely. Probably starting from: if the function
# returns successfully, then close the stream. Otherwise, reset the stream.
# TODO: Handle the reputation of peers. Deduct their scores and even disconnect when they
# behave.
# TODO: Register notifee to the `Network` to
# - Record peers' joining time.
# - Disconnect peers when they fail to join in a certain amount of time.
async def _validate_hello_req(self, hello_other_side: HelloRequest) -> None:
state_machine = self.chain.get_state_machine()
state = self.chain.get_head_state()
config = state_machine.config
if hello_other_side.fork_version != state.fork.current_version:
raise ValidationError(
"`fork_version` mismatches: "
f"hello_other_side.fork_version={hello_other_side.fork_version}, "
f"state.fork.current_version={state.fork.current_version}"
)
# Can not validate the checkpoint with `finalized_epoch` higher than ours
if hello_other_side.finalized_epoch > state.finalized_checkpoint.epoch:
return
# Get the finalized root at `hello_other_side.finalized_epoch`
# Edge case where nothing is finalized yet
if (
hello_other_side.finalized_epoch == 0 and
hello_other_side.finalized_root == ZERO_SIGNING_ROOT
):
return
finalized_epoch_start_slot = compute_start_slot_of_epoch(
hello_other_side.finalized_epoch,
config.SLOTS_PER_EPOCH,
)
finalized_root = self.chain.get_canonical_block_root(
finalized_epoch_start_slot)
if hello_other_side.finalized_root != finalized_root:
raise ValidationError(
"`finalized_root` mismatches: "
f"hello_other_side.finalized_root={hello_other_side.finalized_root}, "
f"hello_other_side.finalized_epoch={hello_other_side.finalized_epoch}, "
f"our `finalized_root` at the same `finalized_epoch`={finalized_root}"
)
def _make_hello_packet(self) -> HelloRequest:
state = self.chain.get_head_state()
head = self.chain.get_canonical_head()
finalized_checkpoint = state.finalized_checkpoint
return HelloRequest(
fork_version=state.fork.current_version,
finalized_root=finalized_checkpoint.root,
finalized_epoch=finalized_checkpoint.epoch,
head_root=head.hash_tree_root,
head_slot=head.slot,
)
def _compare_chain_tip_and_finalized_epoch(self,
peer_finalized_epoch: Epoch,
peer_head_slot: Slot) -> None:
checkpoint = self.chain.get_head_state().finalized_checkpoint
head_block = self.chain.get_canonical_head()
peer_has_higher_finalized_epoch = peer_finalized_epoch > checkpoint.epoch
peer_has_equal_finalized_epoch = peer_finalized_epoch == checkpoint.epoch
peer_has_higher_head_slot = peer_head_slot > head_block.slot
if (
peer_has_higher_finalized_epoch or
(peer_has_equal_finalized_epoch and peer_has_higher_head_slot)
):
# TODO: kickoff syncing process with this peer
self.logger.debug("Peer's chain is ahead of us, start syncing with the peer.")
pass
async def _handle_hello(self, stream: INetStream) -> None:
# TODO: Find out when we should respond the `ResponseCode`
# other than `ResponseCode.SUCCESS`.
peer_id = stream.mplex_conn.peer_id
self.logger.debug("Waiting for hello from the other side")
try:
hello_other_side = await read_req(stream, HelloRequest)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
await self.disconnect_peer(peer_id)
return
self.logger.debug("Received the hello message %s", hello_other_side)
try:
await self._validate_hello_req(hello_other_side)
except ValidationError as error:
self.logger.info(
"Handshake failed: hello message %s is invalid: %s",
hello_other_side,
str(error)
)
await stream.reset()
await self.say_goodbye(peer_id, GoodbyeReasonCode.IRRELEVANT_NETWORK)
await self.disconnect_peer(peer_id)
return
hello_mine = self._make_hello_packet()
self.logger.debug("Sending our hello message %s", hello_mine)
try:
await write_resp(stream, hello_mine, ResponseCode.SUCCESS)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info(
"Handshake failed: failed to write message %s",
hello_mine,
)
await self.disconnect_peer(peer_id)
return
if peer_id not in self.handshaked_peers:
peer = Peer.from_hello_request(self, peer_id, hello_other_side)
self.handshaked_peers.add(peer)
self.logger.debug(
"Handshake from %s is finished. Added to the `handshake_peers`",
peer_id,
)
# Check if we are behind the peer
self._compare_chain_tip_and_finalized_epoch(
hello_other_side.finalized_epoch,
hello_other_side.head_slot,
)
await stream.close()
async def say_hello(self, peer_id: ID) -> None:
hello_mine = self._make_hello_packet()
self.logger.debug(
"Opening new stream to peer=%s with protocols=%s",
peer_id,
[REQ_RESP_HELLO_SSZ],
)
stream = await self.host.new_stream(peer_id, [REQ_RESP_HELLO_SSZ])
self.logger.debug("Sending our hello message %s", hello_mine)
try:
await write_req(stream, hello_mine)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
await self.disconnect_peer(peer_id)
error_msg = f"fail to write request={hello_mine}"
self.logger.info("Handshake failed: %s", error_msg)
raise HandshakeFailure(error_msg)
self.logger.debug("Waiting for hello from the other side")
try:
resp_code, hello_other_side = await read_resp(stream, HelloRequest)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
await self.disconnect_peer(peer_id)
self.logger.info("Handshake failed: fail to read the response")
raise HandshakeFailure("fail to read the response")
self.logger.debug(
"Received the hello message %s, resp_code=%s",
hello_other_side,
resp_code,
)
# TODO: Handle the case when `resp_code` is not success.
if resp_code != ResponseCode.SUCCESS:
# TODO: Do something according to the `ResponseCode`
error_msg = (
"resp_code != ResponseCode.SUCCESS, "
f"resp_code={resp_code}, error_msg={hello_other_side}"
)
self.logger.info("Handshake failed: %s", error_msg)
await stream.reset()
await self.disconnect_peer(peer_id)
raise HandshakeFailure(error_msg)
hello_other_side = cast(HelloRequest, hello_other_side)
try:
await self._validate_hello_req(hello_other_side)
except ValidationError as error:
error_msg = f"hello message {hello_other_side} is invalid: {str(error)}"
self.logger.info(
"Handshake failed: %s. Disconnecting %s",
error_msg,
peer_id,
)
await stream.reset()
await self.say_goodbye(peer_id, GoodbyeReasonCode.IRRELEVANT_NETWORK)
await self.disconnect_peer(peer_id)
raise HandshakeFailure(error_msg) from error
if peer_id not in self.handshaked_peers:
peer = Peer.from_hello_request(self, peer_id, hello_other_side)
self.handshaked_peers.add(peer)
self.logger.debug(
"Handshake to peer=%s is finished. Added to the `handshake_peers`",
peer_id,
)
# Check if we are behind the peer
self._compare_chain_tip_and_finalized_epoch(
hello_other_side.finalized_epoch,
hello_other_side.head_slot,
)
await stream.close()
async def _handle_goodbye(self, stream: INetStream) -> None:
peer_id = stream.mplex_conn.peer_id
self.logger.debug("Waiting for goodbye from %s", peer_id)
try:
goodbye = await read_req(stream, Goodbye)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
self.logger.debug("Received the goodbye message %s", goodbye)
if not has_error:
await stream.close()
await self.disconnect_peer(peer_id)
async def say_goodbye(self, peer_id: ID, reason: GoodbyeReasonCode) -> None:
goodbye = Goodbye(reason)
self.logger.debug(
"Opening new stream to peer=%s with protocols=%s",
peer_id,
[REQ_RESP_GOODBYE_SSZ],
)
stream = await self.host.new_stream(peer_id, [REQ_RESP_GOODBYE_SSZ])
self.logger.debug("Sending our goodbye message %s", goodbye)
try:
await write_req(stream, goodbye)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
if not has_error:
await stream.close()
await self.disconnect_peer(peer_id)
@to_tuple
def _get_blocks_from_canonical_chain_by_slot(
self,
slot_of_requested_blocks: Sequence[Slot],
) -> Iterable[BaseBeaconBlock]:
# If peer's head block is on our canonical chain,
# start getting the requested blocks by slots.
for slot in slot_of_requested_blocks:
try:
block = self.chain.get_canonical_block_by_slot(slot)
except BlockNotFound:
pass
else:
yield block
@to_tuple
def _get_blocks_from_fork_chain_by_root(
self,
start_slot: Slot,
peer_head_block: BaseBeaconBlock,
slot_of_requested_blocks: Sequence[Slot],
) -> Iterable[BaseBeaconBlock]:
# Peer's head block is on a fork chain,
# start getting the requested blocks by
# traversing the history from the head.
# `slot_of_requested_blocks` starts with earliest slot
# and end with most recent slot, so we start traversing
# from the most recent slot.
cur_index = len(slot_of_requested_blocks) - 1
block = peer_head_block
if block.slot == slot_of_requested_blocks[cur_index]:
yield block
cur_index -= 1
while block.slot > start_slot and cur_index >= 0:
try:
block = self.chain.get_block_by_root(block.parent_root)
except (BlockNotFound, ValidationError):
# This should not happen as we only persist block if its
# ancestors are also in the database.
break
else:
while block.slot < slot_of_requested_blocks[cur_index]:
if cur_index > 0:
cur_index -= 1
else:
break
if block.slot == slot_of_requested_blocks[cur_index]:
yield block
def _validate_start_slot(self, start_slot: Slot) -> None:
config = self.chain.get_state_machine().config
state = self.chain.get_head_state()
finalized_epoch_start_slot = compute_start_slot_of_epoch(
epoch=state.finalized_checkpoint.epoch,
slots_per_epoch=config.SLOTS_PER_EPOCH,
)
if start_slot < finalized_epoch_start_slot:
raise ValidationError(
f"`start_slot`({start_slot}) lower than our"
f" latest finalized slot({finalized_epoch_start_slot})"
)
def _get_requested_beacon_blocks(
self,
beacon_blocks_request: BeaconBlocksRequest,
requested_head_block: BaseBeaconBlock,
) -> Tuple[BaseBeaconBlock, ...]:
slot_of_requested_blocks = tuple(
beacon_blocks_request.start_slot + i * beacon_blocks_request.step
for i in range(beacon_blocks_request.count)
)
self.logger.info("slot_of_requested_blocks: %s", slot_of_requested_blocks)
slot_of_requested_blocks = tuple(
filter(lambda slot: slot <= requested_head_block.slot, slot_of_requested_blocks)
)
if len(slot_of_requested_blocks) == 0:
return tuple()
# We have the peer's head block in our database,
# next check if the head block is on our canonical chain.
try:
canonical_block_at_slot = self.chain.get_canonical_block_by_slot(
requested_head_block.slot
)
block_match = canonical_block_at_slot == requested_head_block
except BlockNotFound:
self.logger.debug(
(
"The requested head block is not on our canonical chain "
"requested_head_block: %s canonical_block_at_slot: %s"
),
requested_head_block,
canonical_block_at_slot,
)
block_match = False
finally:
if block_match:
# Peer's head block is on our canonical chain
return self._get_blocks_from_canonical_chain_by_slot(
slot_of_requested_blocks
)
else:
# Peer's head block is not on our canonical chain
# Validate `start_slot` is greater than our latest finalized slot
self._validate_start_slot(beacon_blocks_request.start_slot)
return self._get_blocks_from_fork_chain_by_root(
beacon_blocks_request.start_slot,
requested_head_block,
slot_of_requested_blocks,
)
async def _handle_beacon_blocks(self, stream: INetStream) -> None:
peer_id = stream.mplex_conn.peer_id
if peer_id not in self.handshaked_peers:
self.logger.info(
"Processing beacon blocks request failed: not handshaked with peer=%s yet",
peer_id,
)
await stream.reset()
return
self.logger.debug("Waiting for beacon blocks request from the other side")
try:
beacon_blocks_request = await read_req(stream, BeaconBlocksRequest)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
return
self.logger.debug("Received the beacon blocks request message %s", beacon_blocks_request)
try:
requested_head_block = self.chain.get_block_by_hash_tree_root(
beacon_blocks_request.head_block_root
)
except (BlockNotFound, ValidationError) as error:
self.logger.info("Sending empty blocks, reason: %s", error)
# We don't have the chain data peer is requesting
requested_beacon_blocks: Tuple[BaseBeaconBlock, ...] = tuple()
else:
# Check if slot of specified head block is greater than specified start slot
if requested_head_block.slot < beacon_blocks_request.start_slot:
reason = (
f"Invalid request: head block slot({requested_head_block.slot})"
f" lower than `start_slot`({beacon_blocks_request.start_slot})"
)
try:
await write_resp(stream, reason, ResponseCode.INVALID_REQUEST)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info(
"Processing beacon blocks request failed: failed to write message %s",
reason,
)
return
await stream.close()
return
else:
try:
requested_beacon_blocks = self._get_requested_beacon_blocks(
beacon_blocks_request, requested_head_block
)
except ValidationError as val_error:
reason = "Invalid request: " + str(val_error)
try:
await write_resp(stream, reason, ResponseCode.INVALID_REQUEST)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info(
"Processing beacon blocks request failed: "
"failed to write message %s",
reason,
)
return
await stream.close()
return
# TODO: Should it be a successful response if peer is requesting
# blocks on a fork we don't have data for?
beacon_blocks_response = BeaconBlocksResponse(blocks=requested_beacon_blocks)
self.logger.debug("Sending beacon blocks response %s", beacon_blocks_response)
try:
await write_resp(stream, beacon_blocks_response, ResponseCode.SUCCESS)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info(
"Processing beacon blocks request failed: failed to write message %s",
beacon_blocks_response,
)
return
self.logger.debug(
"Processing beacon blocks request from %s is finished",
peer_id,
)
await stream.close()
async def request_beacon_blocks(self,
peer_id: ID,
head_block_root: HashTreeRoot,
start_slot: Slot,
count: int,
step: int) -> Tuple[BaseBeaconBlock, ...]:
if peer_id not in self.handshaked_peers:
error_msg = f"not handshaked with peer={peer_id} yet"
self.logger.info("Request beacon block failed: %s", error_msg)
raise RequestFailure(error_msg)
beacon_blocks_request = BeaconBlocksRequest(
head_block_root=head_block_root,
start_slot=start_slot,
count=count,
step=step,
)
self.logger.debug(
"Opening new stream to peer=%s with protocols=%s",
peer_id,
[REQ_RESP_BEACON_BLOCKS_SSZ],
)
stream = await self.host.new_stream(peer_id, [REQ_RESP_BEACON_BLOCKS_SSZ])
self.logger.debug("Sending beacon blocks request %s", beacon_blocks_request)
try:
await write_req(stream, beacon_blocks_request)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
error_msg = f"fail to write request={beacon_blocks_request}"
self.logger.info("Request beacon blocks failed: %s", error_msg)
raise RequestFailure(error_msg)
self.logger.debug("Waiting for beacon blocks response")
try:
resp_code, beacon_blocks_response = await read_resp(stream, BeaconBlocksResponse)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info("Request beacon blocks failed: fail to read the response")
raise RequestFailure("fail to read the response")
self.logger.debug(
"Received beacon blocks response %s, resp_code=%s",
beacon_blocks_response,
resp_code,
)
if resp_code != ResponseCode.SUCCESS:
error_msg = (
"resp_code != ResponseCode.SUCCESS, "
f"resp_code={resp_code}, error_msg={beacon_blocks_response}"
)
self.logger.info("Request beacon blocks failed: %s", error_msg)
await stream.reset()
raise RequestFailure(error_msg)
await stream.close()
beacon_blocks_response = cast(BeaconBlocksResponse, beacon_blocks_response)
return beacon_blocks_response.blocks
async def _handle_recent_beacon_blocks(self, stream: INetStream) -> None:
peer_id = stream.mplex_conn.peer_id
if peer_id not in self.handshaked_peers:
self.logger.info(
"Processing recent beacon blocks request failed: not handshaked with peer=%s yet",
peer_id,
)
await stream.reset()
return
self.logger.debug("Waiting for recent beacon blocks request from the other side")
try:
recent_beacon_blocks_request = await read_req(stream, RecentBeaconBlocksRequest)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
return
self.logger.debug(
"Received the recent beacon blocks request message %s",
recent_beacon_blocks_request,
)
recent_beacon_blocks = []
for block_root in recent_beacon_blocks_request.block_roots:
try:
block = self.chain.get_block_by_hash_tree_root(block_root)
except (BlockNotFound, ValidationError):
pass
else:
recent_beacon_blocks.append(block)
recent_beacon_blocks_response = RecentBeaconBlocksResponse(blocks=recent_beacon_blocks)
self.logger.debug("Sending recent beacon blocks response %s", recent_beacon_blocks_response)
try:
await write_resp(stream, recent_beacon_blocks_response, ResponseCode.SUCCESS)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info(
"Processing recent beacon blocks request failed: failed to write message %s",
recent_beacon_blocks_response,
)
return
self.logger.debug(
"Processing recent beacon blocks request from %s is finished",
peer_id,
)
await stream.close()
async def request_recent_beacon_blocks(
self,
peer_id: ID,
block_roots: Sequence[HashTreeRoot]) -> Tuple[BaseBeaconBlock, ...]:
if peer_id not in self.handshaked_peers:
error_msg = f"not handshaked with peer={peer_id} yet"
self.logger.info("Request recent beacon block failed: %s", error_msg)
raise RequestFailure(error_msg)
recent_beacon_blocks_request = RecentBeaconBlocksRequest(block_roots=block_roots)
self.logger.debug(
"Opening new stream to peer=%s with protocols=%s",
peer_id,
[REQ_RESP_RECENT_BEACON_BLOCKS_SSZ],
)
stream = await self.host.new_stream(peer_id, [REQ_RESP_RECENT_BEACON_BLOCKS_SSZ])
self.logger.debug("Sending recent beacon blocks request %s", recent_beacon_blocks_request)
try:
await write_req(stream, recent_beacon_blocks_request)
has_error = False
except (WriteMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, WriteMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
error_msg = f"fail to write request={recent_beacon_blocks_request}"
self.logger.info("Request recent beacon blocks failed: %s", error_msg)
raise RequestFailure(error_msg)
self.logger.debug("Waiting for recent beacon blocks response")
try:
resp_code, recent_beacon_blocks_response = await read_resp(
stream,
RecentBeaconBlocksResponse,
)
has_error = False
except (ReadMessageFailure, MplexStreamEOF, MplexStreamReset) as error:
has_error = True
if isinstance(error, ReadMessageFailure):
await stream.reset()
elif isinstance(error, MplexStreamEOF):
await stream.close()
finally:
if has_error:
self.logger.info("Request recent beacon blocks failed: fail to read the response")
raise RequestFailure("fail to read the response")
self.logger.debug(
"Received recent beacon blocks response %s, resp_code=%s",
recent_beacon_blocks_response,
resp_code,
)
if resp_code != ResponseCode.SUCCESS:
error_msg = (
"resp_code != ResponseCode.SUCCESS, "
f"resp_code={resp_code}, error_msg={recent_beacon_blocks_response}"
)
self.logger.info("Request recent beacon blocks failed: %s", error_msg)
await stream.reset()
raise RequestFailure(error_msg)
await stream.close()
recent_beacon_blocks_response = cast(
RecentBeaconBlocksResponse,
recent_beacon_blocks_response,
)
return recent_beacon_blocks_response.blocks
| 37.102496 | 100 | 0.613226 | [
"MIT"
] | pipermerriam/trinity | trinity/protocol/bcc_libp2p/node.py | 41,629 | Python |
import re
whitespace_re = re.compile('\s+')
def pare(text, size, etc='...'):
'''Pare text to have maximum size and add etc to the end if it's
changed'''
size = int(size)
text = text.strip()
if len(text)>size:
# strip the last word or not
to_be_stripped = not whitespace_re.findall(text[size-1:size+2])
text = text[:size]
if to_be_stripped:
half = size//2
last = None
for mo in whitespace_re.finditer(text[half:]):
last = mo
if last is not None:
text = text[:half+last.start()+1]
return text.rstrip() + etc
else:
return text
| 24.357143 | 71 | 0.541056 | [
"MIT"
] | SlivTime/iktomi | iktomi/utils/text.py | 682 | Python |
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'nomadgram.users'
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
from .signals import user_signed_up
| 20.466667 | 43 | 0.62215 | [
"MIT"
] | JeewhanR/Nomadgram | nomadgram/users/apps.py | 307 | Python |
class BotError(Exception):
"""Base bot error."""
class BotAppError(Exception):
"""Bot App Error."""
class BotApiError(Exception):
"""Bot API Error."""
| 15.181818 | 29 | 0.634731 | [
"MIT"
] | priver/gopubbot | gopubbot/bot/exceptions.py | 167 | Python |
"""
Auto-generated class for JobResult
"""
from .EnumJobResultName import EnumJobResultName
from .EnumJobResultState import EnumJobResultState
from . import client_support
class JobResult(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(data, id, level, name, startTime, state, stderr, stdout):
"""
:type data: str
:type id: str
:type level: int
:type name: EnumJobResultName
:type startTime: int
:type state: EnumJobResultState
:type stderr: str
:type stdout: str
:rtype: JobResult
"""
return JobResult(
data=data,
id=id,
level=level,
name=name,
startTime=startTime,
state=state,
stderr=stderr,
stdout=stdout,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'JobResult'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'data'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.data = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'id'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.id = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'level'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.level = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'name'
val = data.get(property_name)
if val is not None:
datatypes = [EnumJobResultName]
try:
self.name = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'startTime'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.startTime = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'state'
val = data.get(property_name)
if val is not None:
datatypes = [EnumJobResultState]
try:
self.state = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'stderr'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.stderr = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'stdout'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.stdout = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 35.609589 | 107 | 0.601462 | [
"Apache-2.0"
] | 5l1v3r1/0-orchestrator | pyclient/zeroos/orchestrator/client/JobResult.py | 5,199 | Python |
from io import BufferedIOBase
import os
import sys
if sys.platform == 'win32':
import _winapi
import msvcrt
class WindowsPipe:
def __init__(self, experiment_id: str):
self.path: str = r'\\.\pipe\nni-' + experiment_id
self.file = None
self._handle = _winapi.CreateNamedPipe(
self.path,
_winapi.PIPE_ACCESS_DUPLEX,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT,
1,
8192,
8192,
0,
_winapi.NULL
)
def connect(self) -> BufferedIOBase:
_winapi.ConnectNamedPipe(self._handle, _winapi.NULL)
fd = msvcrt.open_osfhandle(self._handle, 0)
self.file = os.fdopen(fd, 'w+b')
return self.file
def close(self) -> None:
if self.file is not None:
self.file.close()
Pipe = WindowsPipe
else:
import socket
from . import management
class UnixPipe:
def __init__(self, experiment_id: str):
self.path: str = str(management.create_experiment_directory(experiment_id) / 'dispatcher-pipe')
self.file = None
self._socket = socket.socket(socket.AF_UNIX)
self._socket.bind(self.path)
self._socket.listen(1) # only accepts one connection
def connect(self) -> BufferedIOBase:
conn, _ = self._socket.accept()
self.file = conn.makefile('rwb')
return self.file
def close(self) -> None:
if self.file is not None:
self.file.close()
self._socket.close()
os.unlink(self.path)
Pipe = UnixPipe
| 27.796875 | 107 | 0.554244 | [
"MIT"
] | 5l1v3r1/nni | nni/experiment/pipe.py | 1,779 | Python |
import os
import scipy.misc
import torch
import numpy as np
import torch.optim as optim
import config
import data_loader
import d_net
import loss_funs
import g_net
dtype = config.dtype
def save_samples(generated_images, iteration, prefix):
generated_images = generated_images.data.cpu().numpy()
num_images, channels, cell_h, cell_w = generated_images.shape
ncols = int(np.sqrt(num_images))
nrows = int(np.math.floor(num_images / float(ncols)))
result = np.zeros(
(cell_h * nrows, cell_w * ncols, channels), dtype=generated_images.dtype
)
for i in range(0, nrows):
for j in range(0, ncols):
result[
i * cell_h : (i + 1) * cell_h, j * cell_w : (j + 1) * cell_w, :
] = generated_images[i * ncols + j].transpose(1, 2, 0)
grid = result
if not os.path.exists("output"):
os.makedirs("output")
scipy.misc.imsave("output/{}_{:05d}.jpg".format(prefix, iteration), grid)
def main():
loss_fp = open("losses.csv", "w")
video_d_net = d_net.DiscriminatorModel(
kernel_sizes_list=d_net.SCALE_KERNEL_SIZES_D,
conv_layer_fms_list=d_net.SCALE_CONV_FMS_D,
scale_fc_layer_sizes_list=d_net.SCALE_FC_LAYER_SIZES_D,
)
video_d_net.type(dtype)
video_g_net = g_net.VideoGANGenerator()
video_g_net.type(dtype)
video_d_optimizer = optim.SGD(video_d_net.parameters(), lr=0.0001)
video_g_optimizer = optim.SGD(video_g_net.parameters(), lr=0.0001)
# Load Pacman dataset
max_size = len(os.listdir("train"))
pacman_dataloader = data_loader.DataLoader("train", max_size, 16, 32, 32, 4)
count = 0
for i in range(1, 5000):
clips_x, clips_y = pacman_dataloader.get_train_batch()
clips_x = torch.tensor(np.rollaxis(clips_x, 3, 1)).type(dtype)
clips_y = torch.tensor(np.rollaxis(clips_y, 3, 1)).type(dtype)
video_d_optimizer.zero_grad()
video_g_optimizer.zero_grad()
# batch_size x noise_size x 1 x 1
batch_size = 16
# WGAN loss
# https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py
video_images = video_g_net(clips_x)
# TESTING: Vanilla Video Gan
video_d_loss_real = (video_d_net(clips_y) - 1).pow(2).mean()
video_d_loss_fake = (video_d_net(video_images)).pow(2).mean()
# Fake batch
labels = torch.zeros(batch_size, 4).t().unsqueeze(2).type(dtype)
video_d_loss_fake = loss_funs.adv_loss(
video_d_net(video_images), labels
) # TODO: Validate if it's right.
video_d_optimizer.zero_grad()
video_d_loss_fake.backward()
video_d_optimizer.step()
# Real batch
labels = torch.ones(batch_size, 4).t().unsqueeze(2).type(dtype)
video_d_loss_real = loss_funs.adv_loss(
video_d_net(clips_y), labels
) # TODO: Validate if it's right.
video_d_optimizer.zero_grad()
video_d_loss_real.backward()
video_d_optimizer.step()
# batch_size x noise_size x 1 x 1
batch_size = 16
# print('G_Time:', end - start)
# TESTING: Vanilla Video Gan
video_images = video_g_net(clips_x)
d_preds = video_d_net(video_images).type(
dtype
) # TODO: Make sure this is working.
gt_frames = clips_y.type(dtype) # TODO: make clips_y at different scales.
gen_frames = video_images.type(
dtype
) # TODO: make the generated frames multi scale.
video_g_loss = loss_funs.combined_loss(gen_frames, gt_frames, d_preds)
video_g_loss.backward()
video_g_optimizer.step()
if count % 20 == 0:
save_samples(clips_y, count, "video_real")
save_samples(video_images, count, "video_fake")
out_str = "{}, {}, {}, {}".format(
count, video_d_loss_real, video_d_loss_fake, video_g_loss
)
print(out_str)
loss_fp.write(out_str)
loss_fp.write("\n")
loss_fp.flush()
torch.save(video_g_net.state_dict(), "generator_net.pth.tmp")
count += 1
loss_fp.close()
# Final Generator save.
torch.save(video_g_net.state_dict(), "generator_net.pth")
if __name__ == "__main__":
main()
| 31.620438 | 91 | 0.635042 | [
"MIT"
] | mswang12/VideoGAN | process.py | 4,332 | Python |
# coding: ascii
"""Python 2.x/3.x compatibility tools"""
import sys
__all__ = ['geterror', 'long_', 'xrange_', 'ord_', 'unichr_',
'unicode_', 'raw_input_', 'as_bytes', 'as_unicode',
'bytes_', 'imap_', 'PY_MAJOR_VERSION']
PY_MAJOR_VERSION = sys.version_info[0]
def geterror():
return sys.exc_info()[1]
# Python 3
if PY_MAJOR_VERSION >= 3:
long_ = int
xrange_ = range
from io import StringIO
from io import BytesIO
unichr_ = chr
unicode_ = str
bytes_ = bytes
raw_input_ = input
imap_ = map
# Represent escaped bytes and strings in a portable way.
#
# as_bytes: Allow a Python 3.x string to represent a bytes object.
# e.g.: as_bytes("a\x01\b") == b"a\x01b" # Python 3.x
# as_bytes("a\x01\b") == "a\x01b" # Python 2.x
# as_unicode: Allow a Python "r" string to represent a unicode string.
# e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" # Python 2.x
# as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" # Python 3.x
def as_bytes(string):
""" '<binary literal>' => b'<binary literal>' """
return string.encode('latin-1', 'strict')
def as_unicode(rstring):
""" r'<Unicode literal>' => '<Unicode literal>' """
return rstring.encode('ascii', 'strict').decode('unicode_escape',
'strict')
# Python 2
else:
long_ = long
xrange_ = xrange
from cStringIO import StringIO
BytesIO = StringIO
unichr_ = unichr
unicode_ = unicode
bytes_ = str
raw_input_ = raw_input
from itertools import imap as imap_
# Represent escaped bytes and strings in a portable way.
#
# as_bytes: Allow a Python 3.x string to represent a bytes object.
# e.g.: as_bytes("a\x01\b") == b"a\x01b" # Python 3.x
# as_bytes("a\x01\b") == "a\x01b" # Python 2.x
# as_unicode: Allow a Python "r" string to represent a unicode string.
# e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" # Python 2.x
# as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" # Python 3.x
def as_bytes(string):
""" '<binary literal>' => '<binary literal>' """
return string
def as_unicode(rstring):
""" r'<Unicode literal>' => u'<Unicode literal>' """
return rstring.decode('unicode_escape', 'strict')
def get_BytesIO():
return BytesIO
def get_StringIO():
return StringIO
def ord_(o):
try:
return ord(o)
except TypeError:
return o
if sys.platform == 'win32':
filesystem_errors = "replace"
elif PY_MAJOR_VERSION >= 3:
filesystem_errors = "surrogateescape"
else:
filesystem_errors = "strict"
def filesystem_encode(u):
fsencoding = sys.getfilesystemencoding()
if fsencoding.lower() in ['ascii', 'ansi_x3.4-1968'] and sys.platform.startswith('linux'):
# Don't believe Linux systems claiming ASCII-only filesystems. In
# practice, arbitrary bytes are allowed, and most things expect UTF-8.
fsencoding = 'utf-8'
return u.encode(fsencoding, filesystem_errors)
| 31.203883 | 95 | 0.593964 | [
"MIT"
] | JE-Chen/je_old_repo | Python_MiniGame_Fighter/venv/Lib/site-packages/pygame/compat.py | 3,214 | Python |
from .fhirbase import fhirbase
class CapabilityStatement(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
resourceType: This is a CapabilityStatement resource
url: An absolute URI that is used to identify this capability
statement when it is referenced in a specification, model, design or
an instance. This SHALL be a URL, SHOULD be globally unique, and
SHOULD be an address at which this capability statement is (or will
be) published. The URL SHOULD include the major version of the
capability statement. For more information see [Technical and Business
Versions](resource.html#versions).
version: The identifier that is used to identify this version of the
capability statement when it is referenced in a specification, model,
design or instance. This is an arbitrary value managed by the
capability statement author and is not expected to be globally unique.
For example, it might be a timestamp (e.g. yyyymmdd) if a managed
version is not available. There is also no expectation that versions
can be placed in a lexicographical sequence.
name: A natural language name identifying the capability statement.
This name should be usable as an identifier for the module by machine
processing applications such as code generation.
title: A short, descriptive, user-friendly title for the capability
statement.
status: The status of this capability statement. Enables tracking the
life-cycle of the content.
experimental: A boolean value to indicate that this capability
statement is authored for testing purposes (or
education/evaluation/marketing), and is not intended to be used for
genuine usage.
date: The date (and optionally time) when the capability statement
was published. The date must change if and when the business version
changes and it must change if the status code changes. In addition, it
should change when the substantive content of the capability statement
changes.
publisher: The name of the individual or organization that published
the capability statement.
contact: Contact details to assist a user in finding and communicating
with the publisher.
description: A free text natural language description of the
capability statement from a consumer's perspective. Typically, this is
used when the capability statement describes a desired rather than an
actual solution, for example as a formal expression of requirements as
part of an RFP.
useContext: The content was developed with a focus and intent of
supporting the contexts that are listed. These terms may be used to
assist with indexing and searching for appropriate capability
statement instances.
jurisdiction: A legal or geographic region in which the capability
statement is intended to be used.
purpose: Explaination of why this capability statement is needed and
why it has been designed as it has.
copyright: A copyright statement relating to the capability statement
and/or its contents. Copyright statements are generally legal
restrictions on the use and publishing of the capability statement.
kind: The way that this statement is intended to be used, to describe
an actual running instance of software, a particular product (kind not
instance of software) or a class of implementation (e.g. a desired
purchase).
instantiates: Reference to a canonical URL of another
CapabilityStatement that this software implements or uses. This
capability statement is a published API description that corresponds
to a business service. The rest of the capability statement does not
need to repeat the details of the referenced resource, but can do so.
software: Software that is covered by this capability statement. It
is used when the capability statement describes the capabilities of a
particular software version, independent of an installation.
implementation: Identifies a specific implementation instance that is
described by the capability statement - i.e. a particular
installation, rather than the capabilities of a software program.
fhirVersion: The version of the FHIR specification on which this
capability statement is based.
acceptUnknown: A code that indicates whether the application accepts
unknown elements or extensions when reading resources.
format: A list of the formats supported by this implementation using
their content types.
patchFormat: A list of the patch formats supported by this
implementation using their content types.
implementationGuide: A list of implementation guides that the server
does (or should) support in their entirety.
profile: A list of profiles that represent different use cases
supported by the system. For a server, "supported by the system" means
the system hosts/produces a set of resources that are conformant to a
particular profile, and allows clients that use its services to search
using this profile and to find appropriate data. For a client, it
means the system will search by this profile and process data
according to the guidance implicit in the profile. See further
discussion in [Using Profiles](profiling.html#profile-uses).
rest: A definition of the restful capabilities of the solution, if
any.
messaging: A description of the messaging capabilities of the
solution.
document: A document definition.
"""
__name__ = 'CapabilityStatement'
def __init__(self, dict_values=None):
self.resourceType = 'CapabilityStatement'
# type: str
# possible values: CapabilityStatement
self.url = None
# type: str
self.version = None
# type: str
self.name = None
# type: str
self.title = None
# type: str
self.status = None
# type: str
# possible values: draft, active, retired, unknown
self.experimental = None
# type: bool
self.date = None
# type: str
self.publisher = None
# type: str
self.contact = None
# type: list
# reference to ContactDetail
self.description = None
# type: str
self.useContext = None
# type: list
# reference to UsageContext
self.jurisdiction = None
# type: list
# reference to CodeableConcept
self.purpose = None
# type: str
self.copyright = None
# type: str
self.kind = None
# type: str
# possible values: instance, capability, requirements
self.instantiates = None
# type: list
self.software = None
# reference to CapabilityStatement_Software
self.implementation = None
# reference to CapabilityStatement_Implementation
self.fhirVersion = None
# type: str
self.acceptUnknown = None
# type: str
# possible values: no, extensions, elements, both
self.format = None
# type: list
self.patchFormat = None
# type: list
self.implementationGuide = None
# type: list
self.profile = None
# type: list
# reference to Reference: identifier
self.rest = None
# type: list
# reference to CapabilityStatement_Rest
self.messaging = None
# type: list
# reference to CapabilityStatement_Messaging
self.document = None
# type: list
# reference to CapabilityStatement_Document
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.status is not None:
for value in self.status:
if value is not None and value.lower() not in [
'draft', 'active', 'retired', 'unknown']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'draft, active, retired, unknown'))
if self.kind is not None:
for value in self.kind:
if value is not None and value.lower() not in [
'instance', 'capability', 'requirements']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'instance, capability, requirements'))
if self.acceptUnknown is not None:
for value in self.acceptUnknown:
if value is not None and value.lower() not in [
'no', 'extensions', 'elements', 'both']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'no, extensions, elements, both'))
def get_relationships(self):
return [
{'parent_entity': 'CapabilityStatement_Rest',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'rest'},
{'parent_entity': 'ContactDetail',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'contact'},
{'parent_entity': 'CapabilityStatement_Software',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'software'},
{'parent_entity': 'CapabilityStatement_Implementation',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'implementation'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement',
'child_variable': 'profile'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'jurisdiction'},
{'parent_entity': 'CapabilityStatement_Document',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'document'},
{'parent_entity': 'CapabilityStatement_Messaging',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'messaging'},
{'parent_entity': 'UsageContext',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement',
'child_variable': 'useContext'},
]
class CapabilityStatement_Software(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
name: Name software is known by.
version: The version identifier for the software covered by this
statement.
releaseDate: Date this version of the software was released.
"""
__name__ = 'CapabilityStatement_Software'
def __init__(self, dict_values=None):
self.name = None
# type: str
self.version = None
# type: str
self.releaseDate = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
class CapabilityStatement_Implementation(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
description: Information about the specific installation that this
capability statement relates to.
url: An absolute base URL for the implementation. This forms the base
for REST interfaces as well as the mailbox and document interfaces.
"""
__name__ = 'CapabilityStatement_Implementation'
def __init__(self, dict_values=None):
self.description = None
# type: str
self.url = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
class CapabilityStatement_Rest(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
mode: Identifies whether this portion of the statement is describing
the ability to initiate or receive restful operations.
documentation: Information about the system's restful capabilities
that apply across all applications, such as security.
security: Information about security implementation from an interface
perspective - what a client needs to know.
resource: A specification of the restful capabilities of the solution
for a specific resource type.
interaction: A specification of restful operations supported by the
system.
searchParam: Search parameters that are supported for searching all
resources for implementations to support and/or make use of - either
references to ones defined in the specification, or additional ones
defined for/by the implementation.
operation: Definition of an operation or a named query together with
its parameters and their meaning and type.
compartment: An absolute URI which is a reference to the definition of
a compartment that the system supports. The reference is to a
CompartmentDefinition resource by its canonical URL .
"""
__name__ = 'CapabilityStatement_Rest'
def __init__(self, dict_values=None):
self.mode = None
# type: str
# possible values: client, server
self.documentation = None
# type: str
self.security = None
# reference to CapabilityStatement_Security
self.resource = None
# type: list
# reference to CapabilityStatement_Resource
self.interaction = None
# type: list
# reference to CapabilityStatement_Interaction1
self.searchParam = None
# type: list
# reference to CapabilityStatement_SearchParam
self.operation = None
# type: list
# reference to CapabilityStatement_Operation
self.compartment = None
# type: list
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.mode is not None:
for value in self.mode:
if value is not None and value.lower() not in [
'client', 'server']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'client, server'))
def get_relationships(self):
return [
{'parent_entity': 'CapabilityStatement_SearchParam',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Rest',
'child_variable': 'searchParam'},
{'parent_entity': 'CapabilityStatement_Interaction1',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Rest',
'child_variable': 'interaction'},
{'parent_entity': 'CapabilityStatement_Security',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Rest',
'child_variable': 'security'},
{'parent_entity': 'CapabilityStatement_Resource',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Rest',
'child_variable': 'resource'},
{'parent_entity': 'CapabilityStatement_Operation',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Rest',
'child_variable': 'operation'},
]
class CapabilityStatement_Security(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
cors: Server adds CORS headers when responding to requests - this
enables javascript applications to use the server.
service: Types of security services that are supported/required by the
system.
description: General description of how security works.
certificate: Certificates associated with security profiles.
"""
__name__ = 'CapabilityStatement_Security'
def __init__(self, dict_values=None):
self.cors = None
# type: bool
self.service = None
# type: list
# reference to CodeableConcept
self.description = None
# type: str
self.certificate = None
# type: list
# reference to CapabilityStatement_Certificate
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CapabilityStatement_Certificate',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Security',
'child_variable': 'certificate'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Security',
'child_variable': 'service'},
]
class CapabilityStatement_Certificate(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
type: Mime type for a certificate.
blob: Actual certificate.
"""
__name__ = 'CapabilityStatement_Certificate'
def __init__(self, dict_values=None):
self.type = None
# type: str
self.blob = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
class CapabilityStatement_Resource(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
type: A type of resource exposed via the restful interface.
profile: A specification of the profile that describes the solution's
overall support for the resource, including any constraints on
cardinality, bindings, lengths or other limitations. See further
discussion in [Using Profiles](profiling.html#profile-uses).
documentation: Additional information about the resource type used by
the system.
interaction: Identifies a restful operation supported by the solution.
versioning: This field is set to no-version to specify that the system
does not support (server) or use (client) versioning for this resource
type. If this has some other value, the server must at least correctly
track and populate the versionId meta-property on resources. If the
value is 'versioned-update', then the server supports all the
versioning features, including using e-tags for version integrity in
the API.
readHistory: A flag for whether the server is able to return past
versions as part of the vRead operation.
updateCreate: A flag to indicate that the server allows or needs to
allow the client to create new identities on the server (e.g. that is,
the client PUTs to a location where there is no existing resource).
Allowing this operation means that the server allows the client to
create new identities on the server.
conditionalCreate: A flag that indicates that the server supports
conditional create.
conditionalRead: A code that indicates how the server supports
conditional read.
conditionalUpdate: A flag that indicates that the server supports
conditional update.
conditionalDelete: A code that indicates how the server supports
conditional delete.
referencePolicy: A set of flags that defines how references are
supported.
searchInclude: A list of _include values supported by the server.
searchRevInclude: A list of _revinclude (reverse include) values
supported by the server.
searchParam: Search parameters for implementations to support and/or
make use of - either references to ones defined in the specification,
or additional ones defined for/by the implementation.
"""
__name__ = 'CapabilityStatement_Resource'
def __init__(self, dict_values=None):
self.type = None
# type: str
self.profile = None
# reference to Reference: identifier
self.documentation = None
# type: str
self.interaction = None
# type: list
# reference to CapabilityStatement_Interaction
self.versioning = None
# type: str
# possible values: no-version, versioned, versioned-update
self.readHistory = None
# type: bool
self.updateCreate = None
# type: bool
self.conditionalCreate = None
# type: bool
self.conditionalRead = None
# type: str
# possible values: not-supported, modified-since, not-match,
# full-support
self.conditionalUpdate = None
# type: bool
self.conditionalDelete = None
# type: str
# possible values: not-supported, single, multiple
self.referencePolicy = None
# type: list
# possible values: literal, logical, resolves, enforced, local
self.searchInclude = None
# type: list
self.searchRevInclude = None
# type: list
self.searchParam = None
# type: list
# reference to CapabilityStatement_SearchParam
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.versioning is not None:
for value in self.versioning:
if value is not None and value.lower() not in [
'no-version', 'versioned', 'versioned-update']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'no-version, versioned, versioned-update'))
if self.conditionalRead is not None:
for value in self.conditionalRead:
if value is not None and value.lower() not in [
'not-supported', 'modified-since', 'not-match', 'full-support']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'not-supported, modified-since, not-match, full-support'))
if self.conditionalDelete is not None:
for value in self.conditionalDelete:
if value is not None and value.lower() not in [
'not-supported', 'single', 'multiple']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'not-supported, single, multiple'))
if self.referencePolicy is not None:
for value in self.referencePolicy:
if value is not None and value.lower() not in [
'literal', 'logical', 'resolves', 'enforced', 'local']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'literal, logical, resolves, enforced, local'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement_Resource',
'child_variable': 'profile'},
{'parent_entity': 'CapabilityStatement_SearchParam',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Resource',
'child_variable': 'searchParam'},
{'parent_entity': 'CapabilityStatement_Interaction',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Resource',
'child_variable': 'interaction'},
]
class CapabilityStatement_Interaction(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
code: Coded identifier of the operation, supported by the system
resource.
documentation: Guidance specific to the implementation of this
operation, such as 'delete is a logical delete' or 'updates are only
allowed with version id' or 'creates permitted from pre-authorized
certificates only'.
"""
__name__ = 'CapabilityStatement_Interaction'
def __init__(self, dict_values=None):
self.code = None
# type: str
# possible values: read, vread, update, patch, delete,
# history-instance, history-type, create, search-type
self.documentation = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.code is not None:
for value in self.code:
if value is not None and value.lower() not in [
'read', 'vread', 'update', 'patch', 'delete', 'history-instance',
'history-type', 'create', 'search-type']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'read, vread, update, patch, delete, history-instance, '
'history-type, create, search-type'))
class CapabilityStatement_SearchParam(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
name: The name of the search parameter used in the interface.
definition: An absolute URI that is a formal reference to where this
parameter was first defined, so that a client can be confident of the
meaning of the search parameter (a reference to
[[[SearchParameter.url]]]).
type: The type of value a search parameter refers to, and how the
content is interpreted.
documentation: This allows documentation of any distinct behaviors
about how the search parameter is used. For example, text matching
algorithms.
"""
__name__ = 'CapabilityStatement_SearchParam'
def __init__(self, dict_values=None):
self.name = None
# type: str
self.definition = None
# type: str
self.type = None
# type: str
# possible values: number, date, string, token, reference,
# composite, quantity, uri
self.documentation = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.type is not None:
for value in self.type:
if value is not None and value.lower() not in [
'number', 'date', 'string', 'token', 'reference', 'composite',
'quantity', 'uri']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'number, date, string, token, reference, composite, quantity, uri'))
class CapabilityStatement_Interaction1(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
code: A coded identifier of the operation, supported by the system.
documentation: Guidance specific to the implementation of this
operation, such as limitations on the kind of transactions allowed, or
information about system wide search is implemented.
"""
__name__ = 'CapabilityStatement_Interaction1'
def __init__(self, dict_values=None):
self.code = None
# type: str
# possible values: transaction, batch, search-system,
# history-system
self.documentation = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.code is not None:
for value in self.code:
if value is not None and value.lower() not in [
'transaction', 'batch', 'search-system', 'history-system']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'transaction, batch, search-system, history-system'))
class CapabilityStatement_Operation(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
name: The name of the operation or query. For an operation, this is
the name prefixed with $ and used in the URL. For a query, this is
the name used in the _query parameter when the query is called.
definition: Where the formal definition can be found.
"""
__name__ = 'CapabilityStatement_Operation'
def __init__(self, dict_values=None):
self.name = None
# type: str
self.definition = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement_Operation',
'child_variable': 'definition'},
]
class CapabilityStatement_Messaging(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
endpoint: An endpoint (network accessible address) to which messages
and/or replies are to be sent.
reliableCache: Length if the receiver's reliable messaging cache in
minutes (if a receiver) or how long the cache length on the receiver
should be (if a sender).
documentation: Documentation about the system's messaging capabilities
for this endpoint not otherwise documented by the capability
statement. For example, the process for becoming an authorized
messaging exchange partner.
supportedMessage: References to message definitions for messages this
system can send or receive.
event: A description of the solution's support for an event at this
end-point.
"""
__name__ = 'CapabilityStatement_Messaging'
def __init__(self, dict_values=None):
self.endpoint = None
# type: list
# reference to CapabilityStatement_Endpoint
self.reliableCache = None
# type: int
self.documentation = None
# type: str
self.supportedMessage = None
# type: list
# reference to CapabilityStatement_SupportedMessage
self.event = None
# type: list
# reference to CapabilityStatement_Event
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CapabilityStatement_Endpoint',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Messaging',
'child_variable': 'endpoint'},
{'parent_entity': 'CapabilityStatement_SupportedMessage',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Messaging',
'child_variable': 'supportedMessage'},
{'parent_entity': 'CapabilityStatement_Event',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Messaging',
'child_variable': 'event'},
]
class CapabilityStatement_Endpoint(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
protocol: A list of the messaging transport protocol(s) identifiers,
supported by this endpoint.
address: The network address of the end-point. For solutions that do
not use network addresses for routing, it can be just an identifier.
"""
__name__ = 'CapabilityStatement_Endpoint'
def __init__(self, dict_values=None):
self.protocol = None
# reference to Coding
self.address = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Endpoint',
'child_variable': 'protocol'},
]
class CapabilityStatement_SupportedMessage(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
mode: The mode of this event declaration - whether application is
sender or receiver.
definition: Points to a message definition that identifies the
messaging event, message structure, allowed responses, etc.
"""
__name__ = 'CapabilityStatement_SupportedMessage'
def __init__(self, dict_values=None):
self.mode = None
# type: str
# possible values: sender, receiver
self.definition = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.mode is not None:
for value in self.mode:
if value is not None and value.lower() not in [
'sender', 'receiver']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'sender, receiver'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement_SupportedMessage',
'child_variable': 'definition'},
]
class CapabilityStatement_Event(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
code: A coded identifier of a supported messaging event.
category: The impact of the content of the message.
mode: The mode of this event declaration - whether an application is a
sender or receiver.
focus: A resource associated with the event. This is the resource
that defines the event.
request: Information about the request for this event.
response: Information about the response for this event.
documentation: Guidance on how this event is handled, such as internal
system trigger points, business rules, etc.
"""
__name__ = 'CapabilityStatement_Event'
def __init__(self, dict_values=None):
self.code = None
# reference to Coding
self.category = None
# type: str
# possible values: Consequence, Currency, Notification
self.mode = None
# type: str
# possible values: sender, receiver
self.focus = None
# type: str
self.request = None
# reference to Reference: identifier
self.response = None
# reference to Reference: identifier
self.documentation = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.category is not None:
for value in self.category:
if value is not None and value.lower() not in [
'consequence', 'currency', 'notification']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'Consequence, Currency, Notification'))
if self.mode is not None:
for value in self.mode:
if value is not None and value.lower() not in [
'sender', 'receiver']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'sender, receiver'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement_Event',
'child_variable': 'request'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement_Event',
'child_variable': 'response'},
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'CapabilityStatement_Event',
'child_variable': 'code'},
]
class CapabilityStatement_Document(fhirbase):
"""
A Capability Statement documents a set of capabilities (behaviors) of
a FHIR Server that may be used as a statement of actual server
functionality or a statement of required or desired server
implementation.
Attributes:
mode: Mode of this document declaration - whether an application is a
producer or consumer.
documentation: A description of how the application supports or uses
the specified document profile. For example, when documents are
created, what action is taken with consumed documents, etc.
profile: A constraint on a resource used in the document.
"""
__name__ = 'CapabilityStatement_Document'
def __init__(self, dict_values=None):
self.mode = None
# type: str
# possible values: producer, consumer
self.documentation = None
# type: str
self.profile = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.mode is not None:
for value in self.mode:
if value is not None and value.lower() not in [
'producer', 'consumer']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'producer, consumer'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'CapabilityStatement_Document',
'child_variable': 'profile'},
]
| 36.502551 | 99 | 0.626156 | [
"MIT"
] | Hector-hedb12/Cardea | cardea/fhir/CapabilityStatement.py | 42,927 | Python |
import os
import sys
import argparse
import datetime
import time
import csv
import os.path as osp
import numpy as np
import warnings
import importlib
import pandas as pd
warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
import torch.backends.cudnn as cudnn
import torchvision
from datasets import CIFAR10D, CIFAR100D
from utils.utils import AverageMeter, Logger, save_networks, load_networks
from core import train, test, test_robustness
parser = argparse.ArgumentParser("Training")
# dataset
parser.add_argument('--data', type=str, default='./data')
parser.add_argument('--outf', type=str, default='./results')
parser.add_argument('-d', '--dataset', type=str, default='cifar10')
parser.add_argument('--workers', default=8, type=int, help="number of data loading workers (default: 4)")
# optimization
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--lr', type=float, default=0.1, help="learning rate for model")
parser.add_argument('--max-epoch', type=int, default=200)
parser.add_argument('--stepsize', type=int, default=30)
parser.add_argument('--aug', type=str, default='none', help='none, aprs')
# model
parser.add_argument('--model', type=str, default='wider_resnet_28_10')
# misc
parser.add_argument('--eval-freq', type=int, default=10)
parser.add_argument('--print-freq', type=int, default=100)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--use-cpu', action='store_true')
parser.add_argument('--eval', action='store_true', help="Eval", default=False)
# parameters for generating adversarial examples
parser.add_argument('--epsilon', '-e', type=float, default=0.0157,
help='maximum perturbation of adversaries (4/255=0.0157)')
parser.add_argument('--alpha', '-a', type=float, default=0.00784,
help='movement multiplier per iteration when generating adversarial examples (2/255=0.00784)')
parser.add_argument('--k', '-k', type=int, default=10,
help='maximum iteration when generating adversarial examples')
parser.add_argument('--perturbation_type', '-p', choices=['linf', 'l2'], default='linf',
help='the type of the perturbation (linf or l2)')
args = parser.parse_args()
options = vars(args)
if not os.path.exists(options['outf']):
os.makedirs(options['outf'])
if not os.path.exists(options['data']):
os.makedirs(options['data'])
sys.stdout = Logger(osp.join(options['outf'], 'logs.txt'))
def main():
torch.manual_seed(options['seed'])
os.environ['CUDA_VISIBLE_DEVICES'] = options['gpu']
use_gpu = torch.cuda.is_available()
if options['use_cpu']: use_gpu = False
options.update({'use_gpu': use_gpu})
if use_gpu:
print("Currently using GPU: {}".format(options['gpu']))
cudnn.benchmark = True
torch.cuda.manual_seed_all(options['seed'])
else:
print("Currently using CPU")
if 'cifar10' == options['dataset']:
Data = CIFAR10D(dataroot=options['data'], batch_size=options['batch_size'], _transforms=options['aug'], _eval=options['eval'])
OODData = CIFAR100D(dataroot=options['data'], batch_size=options['batch_size'], _transforms=options['aug'])
else:
Data = CIFAR100D(dataroot=options['data'], batch_size=options['batch_size'], _transforms=options['aug'], _eval=options['eval'])
OODData = CIFAR10D(dataroot=options['data'], batch_size=options['batch_size'], _transforms=options['aug'])
trainloader, testloader, outloader = Data.train_loader, Data.test_loader, OODData.test_loader
num_classes = Data.num_classes
print("Creating model: {}".format(options['model']))
if 'wide_resnet' in options['model']:
print('wide_resnet')
from model.wide_resnet import WideResNet
net = WideResNet(40, num_classes, 2, 0.0)
elif 'allconv' in options['model']:
print('allconv')
from model.allconv import AllConvNet
net = AllConvNet(num_classes)
elif 'densenet' in options['model']:
print('densenet')
from model.densenet import densenet
net = densenet(num_classes=num_classes)
elif 'resnext' in options['model']:
print('resnext29')
from model.resnext import resnext29
net = resnext29(num_classes)
else:
print('resnet18')
from model.resnet import ResNet18
net = ResNet18(num_classes=num_classes)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
if use_gpu:
net = nn.DataParallel(net, device_ids=[i for i in range(len(options['gpu'].split(',')))]).cuda()
criterion = criterion.cuda()
file_name = '{}_{}_{}'.format(options['model'], options['dataset'], options['aug'])
if options['eval']:
net, criterion = load_networks(net, options['outf'], file_name, criterion=criterion)
outloaders = Data.out_loaders
results = test(net, criterion, testloader, outloader, epoch=0, **options)
acc = results['ACC']
res = dict()
res['ACC'] = dict()
acc_res = []
for key in Data.out_keys:
results = test_robustness(net, criterion, outloaders[key], epoch=0, label=key, **options)
print('{} (%): {:.3f}\t'.format(key, results['ACC']))
res['ACC'][key] = results['ACC']
acc_res.append(results['ACC'])
print('Mean ACC:', np.mean(acc_res))
print('Mean Error:', 100-np.mean(acc_res))
return
params_list = [{'params': net.parameters()},
{'params': criterion.parameters()}]
optimizer = torch.optim.SGD(params_list, lr=options['lr'], momentum=0.9, nesterov=True, weight_decay=5e-4)
scheduler = lr_scheduler.MultiStepLR(optimizer, gamma=0.2, milestones=[60, 120, 160, 190])
start_time = time.time()
best_acc = 0.0
for epoch in range(options['max_epoch']):
print("==> Epoch {}/{}".format(epoch+1, options['max_epoch']))
train(net, criterion, optimizer, trainloader, epoch=epoch, **options)
if options['eval_freq'] > 0 and (epoch+1) % options['eval_freq'] == 0 or (epoch+1) == options['max_epoch'] or epoch > 160:
print("==> Test")
results = test(net, criterion, testloader, outloader, epoch=epoch, **options)
if best_acc < results['ACC']:
best_acc = results['ACC']
print("Best Acc (%): {:.3f}\t".format(best_acc))
save_networks(net, options['outf'], file_name, criterion=criterion)
scheduler.step()
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
if __name__ == '__main__':
main()
| 38.463687 | 135 | 0.659985 | [
"MIT"
] | iCGY96/APR | main.py | 6,885 | Python |
ODD = {1, 3, 5, 7, 9}
def insert_dash(num):
num = str(num)
dex = {i for i, a in enumerate(num) if int(a) in ODD}
return ''.join(b + '-' if {i, i + 1}.issubset(dex) else b
for i, b in enumerate(num))
| 25.777778 | 61 | 0.512931 | [
"MIT"
] | akalynych/CodeWars | katas/kyu_7/insert_dashes.py | 232 | Python |
#!/usr/bin/python
import subprocess;
import sys;
UTM_ZONE="18G";
DENSIFY_FACTOR=1000;
name=sys.argv[1];
prev_x="";
prev_y="";
infile=open(name,"r");
while 1:
line=infile.readline().strip();
if not line:
break;
elements=line.split();
x=elements[0].strip();
y=elements[1].strip();
distance=0.0;
if prev_x:
print(prev_x+" "+prev_y+" "+str(distance));
diff_x=float(x)-float(prev_x);
diff_y=float(y)-float(prev_y);
last_x=prev_x;
last_y=prev_y;
for i in range(1,DENSIFY_FACTOR):
cur_x=str(float(prev_x)+float(i)*diff_x/float(DENSIFY_FACTOR));
cur_y=str(float(prev_y)+float(i)*diff_y/float(DENSIFY_FACTOR));
distance=distance+((float(cur_x)-float(last_x))**2+(float(cur_y)-float(last_y))**2)**0.5;
last_x=cur_x;
last_y=cur_y
print(cur_x+" "+cur_y+" "+str(distance));
distance=distance+((float(x)-float(last_x))**2+(float(y)-float(last_y))**2)**0.5;
prev_x=x;
prev_y=y;
infile.close();
print(prev_x+" "+prev_y+" "+str(distance));
exit();
| 21.822222 | 92 | 0.670061 | [
"MIT"
] | whyjz/CARST | extra/unused/densify_utm_input.py | 982 | Python |
import requests
from bs4 import BeautifulSoup
import jinja2
import re
class Chara:
name = ''
job = ''
hp = 0
mp = 0
str = 0
end = 0
dex = 0
agi = 0
mag = 0
killer = ""
counter_hp = ""
skills = ""
passive_skills = ""
class HtmlParser:
def __init__(self, text):
self.soup = BeautifulSoup(text, 'html.parser')
self.soup_ptr = self.soup.find("div", class_='toc')
def get_next_div(self):
found = False
while not found:
self.soup_ptr = self.soup_ptr.find_next_sibling("div", class_='basic')
if self.soup_ptr.find("table") is not None:
found = True
return self.soup_ptr
def parse_effs(effs_str):
effs = []
if "カウンター率" in effs_str:
effs.append("Ability.counter_rate")
if "ペネトレーション率" in effs_str:
effs.append("Ability.pene_rate")
if "必殺技ゲージ" in effs_str:
effs.append("Ability.energy_bar")
if "クリティカル率" in effs_str:
effs.append("Ability.crit_rate")
if "ガード率" in effs_str:
effs.append("Ability.guard_rate")
if "カウンター発生" in effs_str:
effs.append("SuccessUp.counter")
if "ペネトレーション発生" in effs_str:
effs.append("SuccessUp.pene")
if "クリティカル発生" in effs_str:
effs.append("SuccessUp.crit")
if "ガード発生" in effs_str:
effs.append("SuccessUp.guard")
if "力と魔力" in effs_str:
effs.append("Ability.str")
effs.append("Ability.mag")
elif "力と" in effs_str:
effs.append("Ability.str")
elif "力、魔力" in effs_str:
effs.append("Ability.str")
effs.append("Ability.mag")
elif "魔力" in effs_str:
effs.append("Ability.mag")
elif "の力" in effs_str:
effs.append("Ability.str")
elif "、力" in effs_str:
effs.append("Ability.str")
elif effs_str.startswith("力"):
effs.append("Ability.str")
if "敏捷" in effs_str:
effs.append("Ability.agi")
if "器用" in effs_str:
effs.append("Ability.dex")
if "耐久" in effs_str:
effs.append("Ability.end")
if "火属性耐性" in effs_str:
effs.append("Endurance.fire")
if "地属性耐性" in effs_str:
effs.append("Endurance.earth")
if "風属性耐性" in effs_str:
effs.append("Endurance.wind")
if "水属性耐性" in effs_str:
effs.append("Endurance.ice")
if "雷属性耐性" in effs_str:
effs.append("Endurance.thunder")
if "光属性耐性" in effs_str:
effs.append("Endurance.light")
if "闇属性耐性" in effs_str:
effs.append("Endurance.dark")
if "物理耐性" in effs_str:
effs.append("Endurance.phy")
if "魔法耐性" in effs_str:
effs.append("Endurance.mag")
if "全体攻撃ダメージ" in effs_str:
effs.append("Endurance.foes")
if "単体攻撃ダメージ" in effs_str:
effs.append("Endurance.foe")
if "火属性攻撃" in effs_str:
effs.append("Damage.fire")
if "地属性攻撃" in effs_str:
effs.append("Damage.earth")
if "風属性攻撃" in effs_str:
effs.append("Damage.wind")
if "水属性攻撃" in effs_str:
effs.append("Damage.ice")
if "雷属性攻撃" in effs_str:
effs.append("Damage.thunder")
if "光属性攻撃" in effs_str:
effs.append("Damage.light")
if "闇属性攻撃" in effs_str:
effs.append("Damage.dark")
if "全体攻撃被ダメージ" in effs_str:
effs.append("Endurance.foes")
if "単体攻撃被ダメージ" in effs_str:
effs.append("Endurance.foe")
if "HP" in effs_str or "HP" in effs_str:
effs.append("Recover.hp_turn")
if "MP" in effs_str or "MP" in effs_str:
effs.append("Recover.mp_turn")
return effs
def gen_eff_str(effs, scope, val_for_eff=None, turn=None):
eff_enums = []
for e in effs:
if turn and val_for_eff:
eff_enums.append(f"Effect({scope}, {e}, {val_for_eff}, {turn})")
elif val_for_eff:
eff_enums.append(f"Effect({scope}, {e}, {val_for_eff})")
else:
eff_enums.append(f"Effect({scope}, {e}, 0)")
ret = ", ".join(eff_enums)
return ret
def parse_turns(text):
m = re.match(r".+(\d)ターンの間.+", text, re.UNICODE)
if m is None:
return None
turn = m.group(1)
return turn
def parse_scope(scope_str):
if "敵全体" in scope_str:
scope = "Scope.foes"
elif "敵単体" in scope_str:
scope = "Scope.foe"
elif "味方全体" in scope_str:
scope = "Scope.my_team"
elif "自分" in scope_str:
scope = "Scope.my_self"
else:
raise ValueError
return scope
def parse_atk(text):
scope = parse_scope(text)
if "超強威力" in text:
power = "Power.ultra"
elif "超威力" in text:
power = "Power.super"
elif "強威力" in text:
power = "Power.high"
elif "中威力" in text:
power = "Power.mid"
elif "弱威力" in text:
power = "Power.low"
else:
raise ValueError
m = re.match(r".+(\w)属性(\w\w)攻撃.+", text, re.UNICODE)
attr = m.group(1)
phy_mag = m.group(2)
if attr == "火":
attr_dmg = "Damage.fire"
elif attr == "地":
attr_dmg = "Damage.earth"
elif attr == "風":
attr_dmg = "Damage.wind"
elif attr == "水":
attr_dmg = "Damage.ice"
elif attr == "雷":
attr_dmg = "Damage.thunder"
elif attr == "光":
attr_dmg = "Damage.light"
elif attr == "闇":
attr_dmg = "Damage.dark"
else:
raise ValueError
if phy_mag == "物理":
atk = "Attack.phy"
elif phy_mag == "魔法":
atk = "Attack.mag"
else:
raise ValueError
temp_boost = ""
if "技発動時のみ力を上昇" in text or "技発動時のみ魔力を上昇" in text:
temp_boost = "temp_boost=True, "
boost_by_buff = ""
m = re.match(r".*自分の(\w+)上昇効果1つにつき、この技の威力が(\d+)[%%]上昇.*", text, re.UNICODE)
if m is not None:
up_val = int(m.group(2))
up_val /= 100
up_indexes = m.group(1)
effs = parse_effs(up_indexes)
enum_str = gen_eff_str(effs, "Scope.my_self", up_val)
boost_by_buff = f'boost_by_buff=[{enum_str}],'
atk_str = f"{scope}, {power}, {attr_dmg}, {atk}, {temp_boost} {boost_by_buff}"
return atk_str
def parse_debuff(text, turn):
m = re.match(r".*(敵単体|敵全体)(.+?)を?(\d+)[%%]減少.*", text, re.UNICODE)
if m is None:
m = re.match(r".+(敵単体|敵全体)(.+被ダメージ).*を(\d+)[%%]増加.*", text, re.UNICODE)
if m is None:
return None
scope = parse_scope(m.group(1))
effs_str = m.group(2)
effs = parse_effs(effs_str)
down_val = int(m.group(3))
down_val /= 100
enum_str = gen_eff_str(effs, scope, down_val, turn)
return enum_str
def parse_recover_hp(text, turn):
m = re.match(r".*(\d+)ターンの間、(味方全体|自分)に(\d+)[%%]の(HP|HP)治癒付与.*", text, re.UNICODE)
if m:
turn = m.group(1)
scope = parse_scope(m.group(2))
up_val = int(m.group(3)) / 100
return f"Effect({scope}, Recover.hp_turn, {up_val}, {turn})"
m = re.match(r".*味方全体に\w+の(HP|HP)回復技.*", text, re.UNICODE)
if m:
return f"Effect(Scope.my_team, Recover.hp_imm, 0.8)"
m = re.match(r".*味方全体の(HP|HP)を(\d+)[%%]回復.*", text, re.UNICODE)
if m:
up_val = int(m.group(2)) / 100
return f"Effect(Scope.my_team, Recover.hp_imm, {up_val})"
def parse_recover_mp(text, turn):
m = re.match(r".*(\d+)ターンの間、(味方全体|自分)に(\d+)[%%]の(MP|MP)回復.*", text, re.UNICODE)
if m:
turn = m.group(1)
scope = parse_scope(m.group(2))
up_val = int(m.group(3)) / 100
return f"Effect({scope}, Recover.mp_turn, {up_val}, {turn})"
m = re.match(r".*味方全体の(MP|MP)を(\d+)[%%]回復.*", text, re.UNICODE)
if m:
up_val = int(m.group(2)) / 100
return f"Effect(Scope.my_team, Recover.mp_imm, {up_val})"
m = re.match(r".*自分の(MP|MP)を(\d+)[%%]回復.*", text, re.UNICODE)
if m:
up_val = int(m.group(2)) / 100
return f"Effect(Scope.my_self, Recover.mp_imm, {up_val})"
def parse_buff(text, turn):
m = re.match(r".*(味方全体|自分)(.+)を(\d+)[%%](上昇|軽減).*", text, re.UNICODE)
if m is None:
return None
scope = parse_scope(m.group(1))
effs_str = m.group(2)
effs = parse_effs(effs_str)
up_val = int(m.group(3))
up_val /= 100
enum_str = gen_eff_str(effs, scope, up_val, turn)
return enum_str
def parse_passive_buff(text):
ret_effs = []
scope = "Scope.my_self"
m = re.match(r"(.+)が(\d+)[%%]上昇.*", text, re.UNICODE)
if m:
effs_str = m.group(1)
effs = parse_effs(effs_str)
effs = [e for e in effs if "Ability" not in e]
up_val = int(m.group(2))
up_val /= 100
enum_str = gen_eff_str(effs, scope, up_val)
ret_effs.append(enum_str)
m = re.match(r".*毎ターン(.+)が(\d+)[%%]回復.*", text, re.UNICODE)
if m:
effs_str = m.group(1)
effs = parse_effs(effs_str)
up_val = int(m.group(2))
up_val /= 100
enum_str = gen_eff_str(effs, scope, up_val)
ret_effs.append(enum_str)
return ", ".join(ret_effs)
def parse_adj_buff(text):
ret_effs = []
m = re.match(r".*(敵単体|敵全体)の(.+)上昇効果を解除.*", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
effs_str = m.group(2)
effs = parse_effs(effs_str)
for e in effs:
enum_str = f"Effect({scope}, AdjBuff.clear_buff, 0, 0, {e})"
ret_effs.append(enum_str)
m = re.match(r".*(自分|味方全体)のステイタス上昇効果.*(\d+)ターン延長", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
turn_val = m.group(2)
enum_str = f"Effect({scope}, AdjBuff.extend_buff, {turn_val}, 0)"
ret_effs.append(enum_str)
m = re.match(r".*(敵単体|敵全体)のステイタス減少効果.*(\d+)ターン延長", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
turn_val = m.group(2)
enum_str = f"Effect({scope}, AdjBuff.extend_debuff, {turn_val}, 0)"
ret_effs.append(enum_str)
m = re.match(r".*(敵単体|敵全体)のステイタス上昇効果.*(\d+)ターン減少", text, re.UNICODE)
if m:
scope = parse_scope(m.group(1))
turn_val = m.group(2)
enum_str = f"Effect({scope}, AdjBuff.shorten_buff, {turn_val}, 0)"
ret_effs.append(enum_str)
return ", ".join(ret_effs)
def gen_skill_str(text, is_special=False):
text = text.replace("\n", "")
text = text.replace("・", "")
print(text)
atk_str = ""
mp_str = ""
special_str = ""
is_fast_str = ""
if "すばやく" in text:
is_fast_str = "is_fast=True, "
if "攻撃。" in text:
# has attack
atk_str = parse_atk(text)
turn = parse_turns(text)
texts = []
texts_tmp = text.split("し、")
scope_guessing = None
for txt in texts_tmp:
if "味方全体" in txt:
scope_guessing = "味方全体"
elif "自分" in txt:
scope_guessing = "自分"
if "自分" not in txt and "味方全体" not in txt:
if scope_guessing:
txt = scope_guessing + txt
texts.extend(txt.split("さらに"))
buffs_eff = []
debuffs_eff = []
adj_buffs_eff = []
for t in texts:
b = parse_buff(t, turn)
d = parse_debuff(t, turn)
a = parse_adj_buff(t)
rhp = parse_recover_hp(t, turn)
rmp = parse_recover_mp(t, turn)
if b:
buffs_eff.append(b)
if rhp:
buffs_eff.append(rhp)
if rmp:
buffs_eff.append(rmp)
if d:
debuffs_eff.append(d)
if a:
adj_buffs_eff.append(a)
buffs_str = f"buffs=[{', '.join(buffs_eff)}],"
debuffs_str = f"debuffs=[{', '.join(debuffs_eff)}],"
adj_buffs_str = f"adj_buffs=[{', '.join(adj_buffs_eff)}],"
# print(buffs_str)
# print(debuffs_str)
if is_special:
special_str = "is_special=True,"
else:
m = re.match(r".+(MP:(\d+)).*", text, re.UNICODE)
if m:
mp = m.group(1)
mp_str = f"mp={mp},"
skill_dec_str = f"Skill({is_fast_str} {atk_str} {special_str} {mp_str} {buffs_str} {debuffs_str} {adj_buffs_str})"
return skill_dec_str
def gen_passive_skill_str(text):
b = parse_passive_buff(text)
print(text)
return b
def gen_counter_hp_str(text):
m = re.match(r".*】.*カウンター発生時.*通常攻撃.*(HP|HP)回復", text, re.UNICODE)
if m:
return "counter_hp=True,"
return ""
def gen_killer_str(text):
m = re.match(r".*】(\w+)の敵を攻撃.*(\d+)[%%]上昇.*", text, re.UNICODE)
if m:
killer = m.group(1)
if killer == "猛牛系":
return "killer=Killer.bull, "
elif killer == "巨人系":
return "killer=Killer.giant, "
elif killer == "魔獣系":
return "killer=Killer.beast, "
elif killer == "精霊系":
return "killer=Killer.fairy, "
elif killer == "植物系":
return "killer=Killer.plant, "
elif killer == "昆虫系":
return "killer=Killer.bug, "
elif killer == "堅鉱系":
return "killer=Killer.rock, "
elif killer == "蠕獣系":
return "killer=Killer.worm, "
elif killer == "竜系":
return "killer=Killer.dragon, "
elif killer == "水棲系":
return "killer=Killer.aquatic, "
elif killer == "妖鬼系":
return "killer=Killer.orge, "
elif killer == "幽魔系":
return "killer=Killer.undead, "
else:
raise ValueError
return ""
def parsing_chara(html_text):
parser = HtmlParser(html_text)
chara = Chara()
basics_table = parser.get_next_div()
for tr in basics_table.table.find_all('tr'):
col = tr.th.text
val = tr.td.text
if col == "名称":
chara.name = val
if col == "カテゴリ":
if val == "冒険者":
chara.job = "Adventurer"
elif val == "アシスト":
chara.job = "Assist"
limit_break_status_table = parser.get_next_div()
while "最大値" not in limit_break_status_table.text:
limit_break_status_table = parser.get_next_div()
#limit_break_status_table = parser.get_next_div()
for tr in limit_break_status_table.table.find_all('tr'):
if tr.td is None:
continue
col = tr.td
val = col.find_next_sibling()
print(col.text, val.text)
if col.text == "HP":
chara.hp = int(val.text)
if col.text == "MP":
chara.mp = int(val.text)
if col.text == "物攻":
chara.str = int(val.text.split("(")[0])
if col.text == "魔攻":
chara.mag = int(val.text.split("(")[0])
if col.text == "防御":
chara.end = int(val.text.split("(")[0])
if col.text == "器用":
chara.dex = int(val.text.split("(")[0])
if col.text == "敏捷":
chara.agi = int(val.text.split("(")[0])
all_skills = []
all_passive_skills = []
if chara.job == "Adventurer":
status_table_no_used = parser.get_next_div()
special_skill = parser.get_next_div()
special_skill_dec_str = gen_skill_str(special_skill.text, True)
skills = parser.get_next_div()
for s in skills.find_all("td"):
skill_str = gen_skill_str(s.text)
all_skills.append(skill_str)
if chara.job == "Adventurer":
all_skills.append(special_skill_dec_str)
concated_skills = ',\n '.join(all_skills)
chara.skills = f"skills=[{concated_skills}],"
if chara.job == "Adventurer":
passive_skills = parser.get_next_div()
for s in passive_skills.find_all("td"):
passive_skill_str = gen_passive_skill_str(s.text)
if passive_skill_str:
all_passive_skills.append(passive_skill_str)
if chara.killer == "":
chara.killer = gen_killer_str(s.text)
if chara.counter_hp == "":
chara.counter_hp = gen_counter_hp_str(s.text)
concated_passive_skills = ',\n '.join(all_passive_skills)
chara.passive_skills = f"passive_skills=[Skill(buffs=[{concated_passive_skills}])],"
template = jinja2.Template("""
{{chara.job}}("{{chara.name}}", {{chara.hp}}, {{chara.mp}},
{{chara.str}}, {{chara.end}}, {{chara.dex}}, {{chara.agi}}, {{chara.mag}},
{{chara.skills}}
{{chara.passive_skills}}
{{chara.killer}}
{{chara.counter_hp}}
),
""")
if chara.job == "Adventurer":
out = template.render(chara=chara)
print(out)
else:
for i, s in enumerate(all_skills):
print("======================================================")
if i == 0:
continue
elif i == 1:
print("LV 60~76:")
elif i == 2:
print("LV 80:")
else:
raise
chara.skills = f"skill={s}"
out = template.render(chara=chara)
print(out)
def parsing_chara_from_web(http_url):
r = requests.get(http_url)
html_text = r.text
parsing_chara(html_text)
if __name__ == '__main__':
with open('tmp.html', 'r', encoding="utf-8") as f:
html_text_to_test = f.read()
parsing_chara(html_text_to_test)
| 28.237148 | 118 | 0.558231 | [
"Apache-2.0"
] | gkmike/DamMemoSim | gen_chara.py | 18,267 | Python |
"""Base classes for Axis entities."""
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import DOMAIN as AXIS_DOMAIN
class AxisEntityBase(Entity):
"""Base common to all Axis entities."""
def __init__(self, device):
"""Initialize the Axis event."""
self.device = device
self._attr_device_info = DeviceInfo(
identifiers={(AXIS_DOMAIN, device.unique_id)}
)
async def async_added_to_hass(self):
"""Subscribe device events."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, self.device.signal_reachable, self.update_callback
)
)
@property
def available(self):
"""Return True if device is available."""
return self.device.available
@callback
def update_callback(self, no_delay=None):
"""Update the entities state."""
self.async_write_ha_state()
class AxisEventBase(AxisEntityBase):
"""Base common to all Axis entities from event stream."""
_attr_should_poll = False
def __init__(self, event, device):
"""Initialize the Axis event."""
super().__init__(device)
self.event = event
self._attr_name = f"{device.name} {event.TYPE} {event.id}"
self._attr_unique_id = f"{device.unique_id}-{event.topic}-{event.id}"
self._attr_device_class = event.CLASS
async def async_added_to_hass(self) -> None:
"""Subscribe sensors events."""
self.event.register_callback(self.update_callback)
await super().async_added_to_hass()
async def async_will_remove_from_hass(self) -> None:
"""Disconnect device object when removed."""
self.event.remove_callback(self.update_callback)
| 29.952381 | 77 | 0.665607 | [
"Apache-2.0"
] | 2004happy/core | homeassistant/components/axis/axis_base.py | 1,887 | Python |
'''
This module has all relevant functions to make predictions using a previously
trained model.
'''
import os
import numpy as np
import tensorflow.keras as keras
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import load_model
import cv2
model = load_model('../models/app_ban_ora_selftrained')
def decode_prediction(prediction):
'''
Decodes predictions and returns a result string.
'''
if np.where(prediction == np.amax(prediction))[1] == 2:
prob_orange = round(prediction[0][2] * 100, 2)
label = f"I am {prob_orange} % sure this is an orange \N{tangerine}!"
if np.where(prediction == np.amax(prediction))[1] == 1:
prob_banana = round(prediction[0][1] * 100, 2)
label = f"I am {prob_banana} % sure this is a banana \N{banana}!"
if np.where(prediction == np.amax(prediction))[1] == 0:
prob_apple = round(prediction[0][0] * 100, 2)
label = f"I am {prob_apple} % sure this is an apple \N{red apple}!"
return label
def predict(frame):
'''
Takes a frame as input, makes a prediction, decoodes it
and returns a result string.
'''
img = cv2.resize(frame, (224, 224))
img = cv2.cvtColor(np.float32(img), cv2.COLOR_BGR2RGB)
img = img.reshape(1, 224, 224, 3)
prediction = model.predict(img)
label = decode_prediction(prediction)
return label
| 34.6 | 77 | 0.669798 | [
"MIT"
] | DariusTorabian/image-classifier | src/predict.py | 1,384 | Python |
import asyncio
import math
import time
import traceback
from pathlib import Path
from random import Random
from secrets import randbits
from typing import Dict, Optional, List, Set
import aiosqlite
import chia.server.ws_connection as ws
import dns.asyncresolver
from chia.protocols import full_node_protocol, introducer_protocol
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.address_manager import AddressManager, ExtendedPeerInfo
from chia.server.address_manager_store import AddressManagerStore
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ChiaServer
from chia.types.peer_info import PeerInfo, TimestampedPeerInfo
from chia.util.hash import std_hash
from chia.util.ints import uint64
from chia.util.path import mkdir, path_from_root
MAX_PEERS_RECEIVED_PER_REQUEST = 1000
MAX_TOTAL_PEERS_RECEIVED = 3000
MAX_CONCURRENT_OUTBOUND_CONNECTIONS = 70
NETWORK_ID_DEFAULT_PORTS = {
"mainnet": 8444,
"testnet7": 58444,
"testnet8": 58445,
}
class FullNodeDiscovery:
resolver: Optional[dns.asyncresolver.Resolver]
def __init__(
self,
server: ChiaServer,
root_path: Path,
target_outbound_count: int,
peer_db_path: str,
introducer_info: Optional[Dict],
dns_servers: List[str],
peer_connect_interval: int,
selected_network: str,
default_port: Optional[int],
log,
):
self.server: ChiaServer = server
self.message_queue: asyncio.Queue = asyncio.Queue()
self.is_closed = False
self.target_outbound_count = target_outbound_count
# This is a double check to make sure testnet and mainnet peer databases never mix up.
# If the network is not 'mainnet', it names the peer db differently, including the selected_network.
if selected_network != "mainnet":
if not peer_db_path.endswith(".sqlite"):
raise ValueError(f"Invalid path for peer table db: {peer_db_path}. Make the path end with .sqlite")
peer_db_path = peer_db_path[:-7] + "_" + selected_network + ".sqlite"
self.peer_db_path = path_from_root(root_path, peer_db_path)
self.dns_servers = dns_servers
if introducer_info is not None:
self.introducer_info: Optional[PeerInfo] = PeerInfo(
introducer_info["host"],
introducer_info["port"],
)
else:
self.introducer_info = None
self.peer_connect_interval = peer_connect_interval
self.log = log
self.relay_queue = None
self.address_manager: Optional[AddressManager] = None
self.connection_time_pretest: Dict = {}
self.received_count_from_peers: Dict = {}
self.lock = asyncio.Lock()
self.connect_peers_task: Optional[asyncio.Task] = None
self.serialize_task: Optional[asyncio.Task] = None
self.cleanup_task: Optional[asyncio.Task] = None
self.initial_wait: int = 0
try:
self.resolver: Optional[dns.asyncresolver.Resolver] = dns.asyncresolver.Resolver()
except Exception:
self.resolver = None
self.log.exception("Error initializing asyncresolver")
self.pending_outbound_connections: Set[str] = set()
self.pending_tasks: Set[asyncio.Task] = set()
self.default_port: Optional[int] = default_port
if default_port is None and selected_network in NETWORK_ID_DEFAULT_PORTS:
self.default_port = NETWORK_ID_DEFAULT_PORTS[selected_network]
async def initialize_address_manager(self) -> None:
mkdir(self.peer_db_path.parent)
self.connection = await aiosqlite.connect(self.peer_db_path)
await self.connection.execute("pragma journal_mode=wal")
await self.connection.execute("pragma synchronous=OFF")
self.address_manager_store = await AddressManagerStore.create(self.connection)
if not await self.address_manager_store.is_empty():
self.address_manager = await self.address_manager_store.deserialize()
else:
await self.address_manager_store.clear()
self.address_manager = AddressManager()
self.server.set_received_message_callback(self.update_peer_timestamp_on_message)
async def start_tasks(self) -> None:
random = Random()
self.connect_peers_task = asyncio.create_task(self._connect_to_peers(random))
self.serialize_task = asyncio.create_task(self._periodically_serialize(random))
self.cleanup_task = asyncio.create_task(self._periodically_cleanup())
async def _close_common(self) -> None:
self.is_closed = True
self.cancel_task_safe(self.connect_peers_task)
self.cancel_task_safe(self.serialize_task)
self.cancel_task_safe(self.cleanup_task)
for t in self.pending_tasks:
self.cancel_task_safe(t)
if len(self.pending_tasks) > 0:
await asyncio.wait(self.pending_tasks)
await self.connection.close()
def cancel_task_safe(self, task: Optional[asyncio.Task]):
if task is not None:
try:
task.cancel()
except Exception as e:
self.log.error(f"Error while canceling task.{e} {task}")
def add_message(self, message, data):
self.message_queue.put_nowait((message, data))
async def on_connect(self, peer: ws.WSChiaConnection):
if (
peer.is_outbound is False
and peer.peer_server_port is not None
and peer.connection_type is NodeType.FULL_NODE
and self.server._local_type is NodeType.FULL_NODE
and self.address_manager is not None
):
timestamped_peer_info = TimestampedPeerInfo(
peer.peer_host,
peer.peer_server_port,
uint64(int(time.time())),
)
await self.address_manager.add_to_new_table([timestamped_peer_info], peer.get_peer_info(), 0)
if self.relay_queue is not None:
self.relay_queue.put_nowait((timestamped_peer_info, 1))
if (
peer.is_outbound
and peer.peer_server_port is not None
and peer.connection_type is NodeType.FULL_NODE
and (self.server._local_type is NodeType.FULL_NODE or self.server._local_type is NodeType.WALLET)
and self.address_manager is not None
):
msg = make_msg(ProtocolMessageTypes.request_peers, full_node_protocol.RequestPeers())
await peer.send_message(msg)
# Updates timestamps each time we receive a message for outbound connections.
async def update_peer_timestamp_on_message(self, peer: ws.WSChiaConnection):
if (
peer.is_outbound
and peer.peer_server_port is not None
and peer.connection_type is NodeType.FULL_NODE
and self.server._local_type is NodeType.FULL_NODE
and self.address_manager is not None
):
peer_info = peer.get_peer_info()
if peer_info is None:
return None
if peer_info.host not in self.connection_time_pretest:
self.connection_time_pretest[peer_info.host] = time.time()
if time.time() - self.connection_time_pretest[peer_info.host] > 600:
self.connection_time_pretest[peer_info.host] = time.time()
await self.address_manager.connect(peer_info)
def _num_needed_peers(self) -> int:
target = self.target_outbound_count
outgoing = len(self.server.get_full_node_outgoing_connections())
return max(0, target - outgoing)
"""
Uses the Poisson distribution to determine the next time
when we'll initiate a feeler connection.
(https://en.wikipedia.org/wiki/Poisson_distribution)
"""
def _poisson_next_send(self, now, avg_interval_seconds, random):
return now + (
math.log(random.randrange(1 << 48) * -0.0000000000000035527136788 + 1) * avg_interval_seconds * -1000000.0
+ 0.5
)
async def _introducer_client(self):
if self.introducer_info is None:
return None
async def on_connect(peer: ws.WSChiaConnection):
msg = make_msg(ProtocolMessageTypes.request_peers_introducer, introducer_protocol.RequestPeersIntroducer())
await peer.send_message(msg)
await self.server.start_client(self.introducer_info, on_connect)
async def _query_dns(self, dns_address):
try:
if self.default_port is None:
self.log.error(
"Network id not supported in NETWORK_ID_DEFAULT_PORTS neither in config. Skipping DNS query."
)
return
if self.resolver is None:
self.log.warn("Skipping DNS query: asyncresolver not initialized.")
return
for rdtype in ["A", "AAAA"]:
peers: List[TimestampedPeerInfo] = []
result = await self.resolver.resolve(qname=dns_address, rdtype=rdtype, lifetime=30)
for ip in result:
peers.append(
TimestampedPeerInfo(
ip.to_text(),
self.default_port,
0,
)
)
self.log.info(f"Received {len(peers)} peers from DNS seeder, using rdtype = {rdtype}.")
if len(peers) > 0:
await self._respond_peers_common(full_node_protocol.RespondPeers(peers), None, False)
except Exception as e:
self.log.warn(f"querying DNS introducer failed: {e}")
async def start_client_async(self, addr: PeerInfo, is_feeler: bool) -> None:
try:
if self.address_manager is None:
return
self.pending_outbound_connections.add(addr.host)
client_connected = await self.server.start_client(
addr,
on_connect=self.server.on_connect,
is_feeler=is_feeler,
)
if self.server.is_duplicate_or_self_connection(addr):
# Mark it as a softer attempt, without counting the failures.
await self.address_manager.attempt(addr, False)
else:
if client_connected is True:
await self.address_manager.mark_good(addr)
await self.address_manager.connect(addr)
else:
await self.address_manager.attempt(addr, True)
self.pending_outbound_connections.remove(addr.host)
except Exception as e:
if addr.host in self.pending_outbound_connections:
self.pending_outbound_connections.remove(addr.host)
self.log.error(f"Exception in create outbound connections: {e}")
self.log.error(f"Traceback: {traceback.format_exc()}")
async def _connect_to_peers(self, random) -> None:
next_feeler = self._poisson_next_send(time.time() * 1000 * 1000, 240, random)
retry_introducers = False
introducer_attempts: int = 0
dns_server_index: int = 0
local_peerinfo: Optional[PeerInfo] = await self.server.get_peer_info()
last_timestamp_local_info: uint64 = uint64(int(time.time()))
last_collision_timestamp = 0
if self.initial_wait > 0:
await asyncio.sleep(self.initial_wait)
introducer_backoff = 1
while not self.is_closed:
try:
assert self.address_manager is not None
# We don't know any address, connect to the introducer to get some.
size = await self.address_manager.size()
if size == 0 or retry_introducers:
try:
await asyncio.sleep(introducer_backoff)
except asyncio.CancelledError:
return None
# Run dual between DNS servers and introducers. One time query DNS server,
# next two times query the introducer.
if introducer_attempts % 3 == 0 and len(self.dns_servers) > 0:
dns_address = self.dns_servers[dns_server_index]
dns_server_index = (dns_server_index + 1) % len(self.dns_servers)
await self._query_dns(dns_address)
else:
await self._introducer_client()
# there's some delay between receiving the peers from the
# introducer until they get incorporated to prevent this
# loop for running one more time. Add this delay to ensure
# that once we get peers, we stop contacting the introducer.
try:
await asyncio.sleep(5)
except asyncio.CancelledError:
return None
retry_introducers = False
introducer_attempts += 1
# keep doubling the introducer delay until we reach 5
# minutes
if introducer_backoff < 300:
introducer_backoff *= 2
continue
else:
introducer_backoff = 1
# Only connect out to one peer per network group (/16 for IPv4).
groups = set()
full_node_connected = self.server.get_full_node_outgoing_connections()
connected = [c.get_peer_info() for c in full_node_connected]
connected = [c for c in connected if c is not None]
for conn in full_node_connected:
peer = conn.get_peer_info()
if peer is None:
continue
group = peer.get_group()
groups.add(group)
# Feeler Connections
#
# Design goals:
# * Increase the number of connectable addresses in the tried table.
#
# Method:
# * Choose a random address from new and attempt to connect to it if we can connect
# successfully it is added to tried.
# * Start attempting feeler connections only after node finishes making outbound
# connections.
# * Only make a feeler connection once every few minutes.
is_feeler = False
has_collision = False
if self._num_needed_peers() == 0:
if time.time() * 1000 * 1000 > next_feeler:
next_feeler = self._poisson_next_send(time.time() * 1000 * 1000, 240, random)
is_feeler = True
await self.address_manager.resolve_tried_collisions()
tries = 0
now = time.time()
got_peer = False
addr: Optional[PeerInfo] = None
max_tries = 50
if len(groups) < 3:
max_tries = 10
elif len(groups) <= 5:
max_tries = 25
select_peer_interval = max(0.1, len(groups) * 0.25)
while not got_peer and not self.is_closed:
self.log.debug(f"Address manager query count: {tries}. Query limit: {max_tries}")
try:
await asyncio.sleep(select_peer_interval)
except asyncio.CancelledError:
return None
tries += 1
if tries > max_tries:
addr = None
retry_introducers = True
break
info: Optional[ExtendedPeerInfo] = await self.address_manager.select_tried_collision()
if info is None or time.time() - last_collision_timestamp <= 60:
info = await self.address_manager.select_peer(is_feeler)
else:
has_collision = True
last_collision_timestamp = int(time.time())
if info is None:
if not is_feeler:
retry_introducers = True
break
# Require outbound connections, other than feelers,
# to be to distinct network groups.
addr = info.peer_info
if has_collision:
break
if addr is not None and not addr.is_valid():
addr = None
continue
if not is_feeler and addr.get_group() in groups:
addr = None
continue
if addr in connected:
addr = None
continue
# attempt a node once per 30 minutes.
if now - info.last_try < 1800:
continue
if time.time() - last_timestamp_local_info > 1800 or local_peerinfo is None:
local_peerinfo = await self.server.get_peer_info()
last_timestamp_local_info = uint64(int(time.time()))
if local_peerinfo is not None and addr == local_peerinfo:
continue
got_peer = True
self.log.debug(f"Addrman selected address: {addr}.")
disconnect_after_handshake = is_feeler
extra_peers_needed = self._num_needed_peers()
if extra_peers_needed == 0:
disconnect_after_handshake = True
retry_introducers = False
self.log.debug(f"Num peers needed: {extra_peers_needed}")
initiate_connection = extra_peers_needed > 0 or has_collision or is_feeler
connect_peer_interval = max(0.25, len(groups) * 0.5)
if not initiate_connection:
connect_peer_interval += 15
connect_peer_interval = min(connect_peer_interval, self.peer_connect_interval)
if addr is not None and initiate_connection and addr.host not in self.pending_outbound_connections:
if len(self.pending_outbound_connections) >= MAX_CONCURRENT_OUTBOUND_CONNECTIONS:
self.log.debug("Max concurrent outbound connections reached. waiting")
await asyncio.wait(self.pending_tasks, return_when=asyncio.FIRST_COMPLETED)
self.pending_tasks.add(
asyncio.create_task(self.start_client_async(addr, disconnect_after_handshake))
)
await asyncio.sleep(connect_peer_interval)
# prune completed connect tasks
self.pending_task = set(filter(lambda t: not t.done(), self.pending_tasks))
except Exception as e:
self.log.error(f"Exception in create outbound connections: {e}")
self.log.error(f"Traceback: {traceback.format_exc()}")
async def _periodically_serialize(self, random: Random):
while not self.is_closed:
if self.address_manager is None:
await asyncio.sleep(10)
continue
serialize_interval = random.randint(15 * 60, 30 * 60)
await asyncio.sleep(serialize_interval)
async with self.address_manager.lock:
await self.address_manager_store.serialize(self.address_manager)
async def _periodically_cleanup(self) -> None:
while not self.is_closed:
# Removes entries with timestamp worse than 14 days ago
# and with a high number of failed attempts.
# Most likely, the peer left the network,
# so we can save space in the peer tables.
cleanup_interval = 1800
max_timestamp_difference = 14 * 3600 * 24
max_consecutive_failures = 10
await asyncio.sleep(cleanup_interval)
# Perform the cleanup only if we have at least 3 connections.
full_node_connected = self.server.get_full_node_connections()
connected = [c.get_peer_info() for c in full_node_connected]
connected = [c for c in connected if c is not None]
if self.address_manager is not None and len(connected) >= 3:
async with self.address_manager.lock:
self.address_manager.cleanup(max_timestamp_difference, max_consecutive_failures)
async def _respond_peers_common(self, request, peer_src, is_full_node) -> None:
# Check if we got the peers from a full node or from the introducer.
peers_adjusted_timestamp = []
is_misbehaving = False
if len(request.peer_list) > MAX_PEERS_RECEIVED_PER_REQUEST:
is_misbehaving = True
if is_full_node:
if peer_src is None:
return None
async with self.lock:
if peer_src.host not in self.received_count_from_peers:
self.received_count_from_peers[peer_src.host] = 0
self.received_count_from_peers[peer_src.host] += len(request.peer_list)
if self.received_count_from_peers[peer_src.host] > MAX_TOTAL_PEERS_RECEIVED:
is_misbehaving = True
if is_misbehaving:
return None
for peer in request.peer_list:
if peer.timestamp < 100000000 or peer.timestamp > time.time() + 10 * 60:
# Invalid timestamp, predefine a bad one.
current_peer = TimestampedPeerInfo(
peer.host,
peer.port,
uint64(int(time.time() - 5 * 24 * 60 * 60)),
)
else:
current_peer = peer
if not is_full_node:
current_peer = TimestampedPeerInfo(
peer.host,
peer.port,
uint64(0),
)
peers_adjusted_timestamp.append(current_peer)
assert self.address_manager is not None
if is_full_node:
await self.address_manager.add_to_new_table(peers_adjusted_timestamp, peer_src, 2 * 60 * 60)
else:
await self.address_manager.add_to_new_table(peers_adjusted_timestamp, None, 0)
class FullNodePeers(FullNodeDiscovery):
self_advertise_task: Optional[asyncio.Task] = None
address_relay_task: Optional[asyncio.Task] = None
def __init__(
self,
server,
root_path,
max_inbound_count,
target_outbound_count,
peer_db_path,
introducer_info,
dns_servers,
peer_connect_interval,
selected_network,
default_port,
log,
):
super().__init__(
server,
root_path,
target_outbound_count,
peer_db_path,
introducer_info,
dns_servers,
peer_connect_interval,
selected_network,
default_port,
log,
)
self.relay_queue = asyncio.Queue()
self.neighbour_known_peers = {}
self.key = randbits(256)
async def start(self):
await self.initialize_address_manager()
self.self_advertise_task = asyncio.create_task(self._periodically_self_advertise_and_clean_data())
self.address_relay_task = asyncio.create_task(self._address_relay())
await self.start_tasks()
async def close(self):
await self._close_common()
self.cancel_task_safe(self.self_advertise_task)
self.cancel_task_safe(self.address_relay_task)
async def _periodically_self_advertise_and_clean_data(self):
while not self.is_closed:
try:
try:
await asyncio.sleep(24 * 3600)
except asyncio.CancelledError:
return None
# Clean up known nodes for neighbours every 24 hours.
async with self.lock:
for neighbour in list(self.neighbour_known_peers.keys()):
self.neighbour_known_peers[neighbour].clear()
# Self advertise every 24 hours.
peer = await self.server.get_peer_info()
if peer is None:
continue
timestamped_peer = [
TimestampedPeerInfo(
peer.host,
peer.port,
uint64(int(time.time())),
)
]
msg = make_msg(
ProtocolMessageTypes.respond_peers,
full_node_protocol.RespondPeers(timestamped_peer),
)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
async with self.lock:
for host in list(self.received_count_from_peers.keys()):
self.received_count_from_peers[host] = 0
except Exception as e:
self.log.error(f"Exception in self advertise: {e}")
self.log.error(f"Traceback: {traceback.format_exc()}")
async def add_peers_neighbour(self, peers, neighbour_info):
neighbour_data = (neighbour_info.host, neighbour_info.port)
async with self.lock:
for peer in peers:
if neighbour_data not in self.neighbour_known_peers:
self.neighbour_known_peers[neighbour_data] = set()
if peer.host not in self.neighbour_known_peers[neighbour_data]:
self.neighbour_known_peers[neighbour_data].add(peer.host)
async def request_peers(self, peer_info: PeerInfo):
try:
# Prevent a fingerprint attack: do not send peers to inbound connections.
# This asymmetric behavior for inbound and outbound connections was introduced
# to prevent a fingerprinting attack: an attacker can send specific fake addresses
# to users' AddrMan and later request them by sending getaddr messages.
# Making nodes which are behind NAT and can only make outgoing connections ignore
# the request_peers message mitigates the attack.
if self.address_manager is None:
return None
peers = await self.address_manager.get_peers()
await self.add_peers_neighbour(peers, peer_info)
msg = make_msg(
ProtocolMessageTypes.respond_peers,
full_node_protocol.RespondPeers(peers),
)
return msg
except Exception as e:
self.log.error(f"Request peers exception: {e}")
async def respond_peers(self, request, peer_src, is_full_node):
try:
await self._respond_peers_common(request, peer_src, is_full_node)
if is_full_node:
await self.add_peers_neighbour(request.peer_list, peer_src)
if len(request.peer_list) == 1 and self.relay_queue is not None:
peer = request.peer_list[0]
if peer.timestamp > time.time() - 60 * 10:
self.relay_queue.put_nowait((peer, 2))
except Exception as e:
self.log.error(f"Respond peers exception: {e}. Traceback: {traceback.format_exc()}")
async def _address_relay(self):
while not self.is_closed:
try:
try:
relay_peer, num_peers = await self.relay_queue.get()
except asyncio.CancelledError:
return None
relay_peer_info = PeerInfo(relay_peer.host, relay_peer.port)
if not relay_peer_info.is_valid():
continue
# https://en.bitcoin.it/wiki/Satoshi_Client_Node_Discovery#Address_Relay
connections = self.server.get_full_node_connections()
hashes = []
cur_day = int(time.time()) // (24 * 60 * 60)
for connection in connections:
peer_info = connection.get_peer_info()
if peer_info is None:
continue
cur_hash = int.from_bytes(
bytes(
std_hash(
self.key.to_bytes(32, byteorder="big")
+ peer_info.get_key()
+ cur_day.to_bytes(3, byteorder="big")
)
),
byteorder="big",
)
hashes.append((cur_hash, connection))
hashes.sort(key=lambda x: x[0])
for index, (_, connection) in enumerate(hashes):
if index >= num_peers:
break
peer_info = connection.get_peer_info()
pair = (peer_info.host, peer_info.port)
async with self.lock:
if pair in self.neighbour_known_peers and relay_peer.host in self.neighbour_known_peers[pair]:
continue
if pair not in self.neighbour_known_peers:
self.neighbour_known_peers[pair] = set()
self.neighbour_known_peers[pair].add(relay_peer.host)
if connection.peer_node_id is None:
continue
msg = make_msg(
ProtocolMessageTypes.respond_peers,
full_node_protocol.RespondPeers([relay_peer]),
)
await connection.send_message(msg)
except Exception as e:
self.log.error(f"Exception in address relay: {e}")
self.log.error(f"Traceback: {traceback.format_exc()}")
class WalletPeers(FullNodeDiscovery):
def __init__(
self,
server,
root_path,
target_outbound_count,
peer_db_path,
introducer_info,
dns_servers,
peer_connect_interval,
selected_network,
default_port,
log,
) -> None:
super().__init__(
server,
root_path,
target_outbound_count,
peer_db_path,
introducer_info,
dns_servers,
peer_connect_interval,
selected_network,
default_port,
log,
)
async def start(self) -> None:
self.initial_wait = 60
await self.initialize_address_manager()
await self.start_tasks()
async def ensure_is_closed(self) -> None:
if self.is_closed:
return None
await self._close_common()
async def respond_peers(self, request, peer_src, is_full_node) -> None:
await self._respond_peers_common(request, peer_src, is_full_node)
| 44.271831 | 119 | 0.581459 | [
"Apache-2.0"
] | 13thProgression/gold-blockchain | chia/server/node_discovery.py | 31,433 | Python |
from collections.abc import Mapping
import inspect
import types
from typing import Callable
import numpy as np
import sympy
from sympy.codegen import cfunctions as sympy_cfunctions
from numpy.random import randn, rand
from sympy import Function as sympy_Function
from sympy import S
import brian2.units.unitsafefunctions as unitsafe
from brian2.core.preferences import prefs
from brian2.core.variables import Constant
from brian2.units.fundamentalunits import (fail_for_dimension_mismatch,
Quantity, get_dimensions,
DIMENSIONLESS, is_dimensionless)
from brian2.units.allunits import second
__all__ = ['DEFAULT_FUNCTIONS', 'Function', 'implementation', 'declare_types']
BRIAN_DTYPES = ['boolean', 'integer', 'float']
VALID_ARG_TYPES = BRIAN_DTYPES+['any']
VALID_RETURN_TYPES = BRIAN_DTYPES+['highest']
def declare_types(**types):
'''
Decorator to declare argument and result types for a function
Usage is similar to `check_units` except that types must be one of ``{VALID_ARG_TYPES}``
and the result type must be one of ``{VALID_RETURN_TYPES}``. Unspecified argument
types are assumed to be ``'all'`` (i.e. anything is permitted), and an unspecified
result type is assumed to be ``'float'``. Note that the ``'highest'`` option for
result type will give the highest type of its argument, e.g. if the arguments
were boolean and integer then the result would be integer, if the arguments were
integer and float it would be float.
'''
def annotate_function_with_types(f):
if hasattr(f, '_orig_arg_names'):
arg_names = f._orig_arg_names
else:
arg_names = f.__code__.co_varnames[0:f.__code__.co_argcount]
argtypes = []
for name in arg_names:
arg_type = types.get(name, 'any')
if arg_type not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"for argument %s" % (arg_type, VALID_ARG_TYPES, name))
argtypes.append(arg_type)
for n in types:
if n not in arg_names and n!='result':
raise ValueError("Type specified for unknown argument "+n)
return_type = types.get('result', 'float')
if return_type not in VALID_RETURN_TYPES:
raise ValueError("Result type %s is not valid, "
"must be one of %s" % (return_type, VALID_RETURN_TYPES))
f._arg_types = argtypes
f._return_type = return_type
f._orig_arg_names = arg_names
f._annotation_attributes = getattr(f, '_annotation_attributes', [])+['_arg_types', '_return_type']
return f
return annotate_function_with_types
class Function(object):
'''
An abstract specification of a function that can be used as part of
model equations, etc.
Parameters
----------
pyfunc : function
A Python function that is represented by this `Function` object.
sympy_func : `sympy.Function`, optional
A corresponding sympy function (if any). Allows functions to be
interpreted by sympy and potentially make simplifications. For example,
``sqrt(x**2)`` could be replaced by ``abs(x)``.
arg_units : list of `Unit`, optional
If `pyfunc` does not provide unit information (which typically means
that it was not annotated with a `check_units` decorator), the
units of the arguments have to specified explicitly using this
parameter.
return_unit : `Unit` or callable, optional
Same as for `arg_units`: if `pyfunc` does not provide unit information,
this information has to be provided explictly here. `return_unit` can
either be a specific `Unit`, if the function always returns the same
unit, or a function of the input units, e.g. a "square" function would
return the square of its input units, i.e. `return_unit` could be
specified as ``lambda u: u**2``.
arg_types : list of str, optional
Similar to `arg_units`, but gives the type of the argument rather than
its unit. In the current version of Brian arguments are specified
by one of the following strings: 'boolean', 'integer', 'float', 'any'.
If `arg_types` is not specified, 'any' will be assumed. In
future versions, a more refined specification may be possible. Note that
any argument with a type other than float should have no units. If
return_type : str, optional
Similar to `return_unit` and `arg_types`. In addition to 'boolean',
'integer' and 'float' you can also use 'highest' which will return the
highest type of its arguments. You can also give a function, as for
`return_unit`. If the return type is not specified, it is assumed to
be 'float'.
stateless : bool, optional
Whether this function does not have an internal state, i.e. if it
always returns the same output when called with the same arguments.
This is true for mathematical functions but not true for ``rand()``, for
example. Defaults to ``True``.
auto_vectorise : bool, optional
Whether the implementations of this function should get an additional
argument (not specified in abstract code) that can be used to determine
the number of values that should be returned (for the numpy target), or
an index potentially useful for generating deterministic values
independent of the order of vectorisation (for all other targets). The
main use case are random number functions, e.g. equations refer to
``rand()``, but the generate code will actually call
``rand(_vectorisation_idx)``. Defaults to ``False``.
Notes
-----
If a function should be usable for code generation targets other than
Python/numpy, implementations for these target languages have to be added
using the `~brian2.codegen.functions.implementation` decorator or using the
`~brian2.codegen.functions.add_implementations` function.
'''
def __init__(self, pyfunc, sympy_func=None,
arg_units=None, arg_names=None,
return_unit=None,
arg_types=None, return_type=None,
stateless=True, auto_vectorise=False):
self.pyfunc = pyfunc
self.sympy_func = sympy_func
self._arg_units = arg_units
self._arg_names = arg_names
self._return_unit = return_unit
if return_unit == bool:
self._returns_bool = True
else:
self._returns_bool = False
self._arg_types = arg_types
self._return_type = return_type
self.stateless = stateless
self.auto_vectorise = auto_vectorise
if self._arg_units is None:
if not hasattr(pyfunc, '_arg_units'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"arg_units" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._arg_units is None:
# @check_units sets _arg_units to None if the units aren't
# specified for all of its arguments
raise ValueError(('The Python function "%s" does not specify '
'the units for all of its '
'arguments.') % pyfunc.__name__)
else:
self._arg_units = pyfunc._arg_units
else:
if any(isinstance(u, str) for u in self._arg_units):
if self._arg_names is None:
raise TypeError('Need to specify the names of the '
'arguments.')
if len(self._arg_names) != len(self._arg_units):
raise TypeError(f'arg_names and arg_units need to have the '
f'same length ({len(self._arg_names)} != '
f'({len(self._arg_units)})')
if self._return_unit is None:
if not hasattr(pyfunc, '_return_unit'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"return_unit" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._return_unit is None:
# @check_units sets _return_unit to None if no "result=..."
# keyword is specified.
raise ValueError(('The Python function "%s" does not specify '
'the unit for its return '
'value.') % pyfunc.__name__)
else:
self._return_unit = pyfunc._return_unit
if self._arg_types is None:
if hasattr(pyfunc, '_arg_types'):
self._arg_types = pyfunc._arg_types
else:
self._arg_types = ['any']*len(self._arg_units)
if self._return_type is None:
self._return_type = getattr(pyfunc, '_return_type', 'float')
for argtype, u in zip(self._arg_types, self._arg_units):
if argtype!='float' and argtype!='any' and u is not None and not is_dimensionless(u):
raise TypeError("Non-float arguments must be dimensionless in function "+pyfunc.__name__)
if argtype not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"in function %s" % (argtype, VALID_ARG_TYPES, pyfunc.__name__))
if self._return_type not in VALID_RETURN_TYPES:
raise ValueError("Return type %s is not valid, must be one of %s, "
"in function %s" % (self._return_type, VALID_RETURN_TYPES, pyfunc.__name__))
#: Stores implementations for this function in a
#: `FunctionImplementationContainer`
self.implementations = FunctionImplementationContainer(self)
def is_locally_constant(self, dt):
'''
Return whether this function (if interpreted as a function of time)
should be considered constant over a timestep. This is most importantly
used by `TimedArray` so that linear integration can be used. In its
standard implementation, always returns ``False``.
Parameters
----------
dt : float
The length of a timestep (without units).
Returns
-------
constant : bool
Whether the results of this function can be considered constant
over one timestep of length `dt`.
'''
return False
def __call__(self, *args):
return self.pyfunc(*args)
class FunctionImplementation(object):
'''
A simple container object for function implementations.
Parameters
----------
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
code : language-dependent, optional
A language dependent argument specifying the implementation in the
target language, e.g. a code string or a dictionary of code strings.
namespace : dict-like, optional
A dictionary of mappings from names to values that should be added
to the namespace of a `CodeObject` using the function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
availability_check : callable, optional
A function that will be called to check whether the function should be
made available (e.g. depending on whether it is supported by the
compiler). The function should do nothing if the function is
available, or raise a ``NotImplementedError`` with a message
explaining why it isn't.
dynamic : bool, optional
Whether this `code`/`namespace` is dynamic, i.e. generated for each
new context it is used in. If set to ``True``, `code` and `namespace`
have to be callable with a `Group` as an argument and are expected
to return the final `code` and `namespace`. Defaults to ``False``.
'''
def __init__(self, name=None, code=None, namespace=None,
dependencies=None, availability_check=None,
dynamic=False, compiler_kwds=None):
if compiler_kwds is None:
compiler_kwds = {}
self.name = name
if dependencies is None:
dependencies = {}
self.dependencies = dependencies
self._code = code
self._namespace = namespace
self.dynamic = dynamic
self.compiler_kwds = compiler_kwds
self.availability_check = availability_check
def get_code(self, owner):
if self.availability_check is not None:
self.availability_check()
if self.dynamic:
return self._code(owner)
else:
return self._code
def get_namespace(self, owner):
if self.dynamic:
return self._namespace(owner)
else:
return self._namespace
class FunctionImplementationContainer(Mapping):
'''
Helper object to store implementations and give access in a dictionary-like
fashion, using `CodeGenerator` implementations as a fallback for `CodeObject`
implementations.
'''
def __init__(self, function):
self._function = function
self._implementations = dict()
def __getitem__(self, key):
'''
Find an implementation for this function that can be used by the
`CodeObject` given as `key`. Will find implementations registered
for `key` itself (or one of its parents), or for the `CodeGenerator`
class that `key` uses (or one of its parents). In all cases,
implementations registered for the corresponding names qualify as well.
Parameters
----------
key : `CodeObject`
The `CodeObject` that will use the `Function`
Returns
-------
implementation : `FunctionImplementation`
An implementation suitable for `key`.
'''
fallback = getattr(key, 'generator_class', None)
# in some cases we do the code generation with original_generator_class instead (e.g. GSL)
fallback_parent = getattr(key, 'original_generator_class', None)
for K in [key, fallback, fallback_parent]:
name = getattr(K, 'class_name',
'no class name for key')
for impl_key, impl in self._implementations.items():
impl_key_name = getattr(impl_key, 'class_name',
'no class name for implementation')
if ((impl_key_name is not None and impl_key_name in [K, name]) or
(impl_key is not None and impl_key in [K, name])):
return impl
if hasattr(K, '__bases__'):
for cls in inspect.getmro(K):
if cls in self._implementations:
return self._implementations[cls]
name = getattr(cls, 'class_name', None)
if name in self._implementations:
return self._implementations[name]
# Give a nicer error message if possible
if getattr(key, 'class_name', None) is not None:
key = key.class_name
elif getattr(fallback, 'class_name', None) is not None:
key = fallback.class_name
keys = ', '.join([getattr(k, 'class_name', str(k))
for k in self._implementations])
raise KeyError(('No implementation available for target {key}. '
'Available implementations: {keys}').format(key=key,
keys=keys))
def add_numpy_implementation(self, wrapped_func, dependencies=None,
discard_units=None, compiler_kwds=None):
'''
Add a numpy implementation to a `Function`.
Parameters
----------
function : `Function`
The function description for which an implementation should be added.
wrapped_func : callable
The original function (that will be used for the numpy implementation)
dependencies : list of `Function`, optional
A list of functions this function needs.
discard_units : bool, optional
See `implementation`.
'''
if discard_units is None:
discard_units = prefs['codegen.runtime.numpy.discard_units']
# Get the original function inside the check_units decorator
if hasattr(wrapped_func, '_orig_func'):
orig_func = wrapped_func._orig_func
else:
orig_func = wrapped_func
if discard_units:
new_globals = dict(orig_func.__globals__)
# strip away units in the function by changing its namespace
for key, value in new_globals.items():
if isinstance(value, Quantity):
new_globals[key] = np.asarray(value)
unitless_func = types.FunctionType(orig_func.__code__, new_globals,
orig_func.__name__,
orig_func.__defaults__,
orig_func.__closure__)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=unitless_func,
dependencies=dependencies,
compiler_kwds=None)
else:
def wrapper_function(*args):
arg_units = list(self._function._arg_units)
if self._function.auto_vectorise:
arg_units += [DIMENSIONLESS]
if not len(args) == len(arg_units):
raise ValueError(('Function %s got %d arguments, '
'expected %d') % (self._function.pyfunc.__name__, len(args),
len(arg_units)))
new_args = []
for arg, arg_unit in zip(args, arg_units):
if arg_unit == bool or arg_unit is None or isinstance(arg_unit, str):
new_args.append(arg)
else:
new_args.append(Quantity.with_dimensions(arg,
get_dimensions(arg_unit)))
result = orig_func(*new_args)
if isinstance(self._function._return_unit, Callable):
return_unit = self._function._return_unit(*[get_dimensions(a)
for a in args])
else:
return_unit = self._function._return_unit
if return_unit == bool:
if not (isinstance(result, bool) or
np.asarray(result).dtype == bool):
raise TypeError('The function %s returned '
'%s, but it was expected '
'to return a boolean '
'value ' % (orig_func.__name__,
result))
elif (isinstance(return_unit, int) and return_unit == 1) or return_unit.dim is DIMENSIONLESS:
fail_for_dimension_mismatch(result,
return_unit,
'The function %s returned '
'{value}, but it was expected '
'to return a dimensionless '
'quantity' % orig_func.__name__,
value=result)
else:
fail_for_dimension_mismatch(result,
return_unit,
('The function %s returned '
'{value}, but it was expected '
'to return a quantity with '
'units %r') % (orig_func.__name__,
return_unit),
value=result)
return np.asarray(result)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=wrapper_function,
dependencies=dependencies)
def add_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
self._implementations[target] = FunctionImplementation(name=name,
code=code,
dependencies=dependencies,
availability_check=availability_check,
namespace=namespace,
compiler_kwds=compiler_kwds)
def add_dynamic_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
'''
Adds an "dynamic implementation" for this function. `code` and `namespace`
arguments are expected to be callables that will be called in
`Network.before_run` with the owner of the `CodeObject` as an argument.
This allows to generate code that depends on details of the context it
is run in, e.g. the ``dt`` of a clock.
'''
if not callable(code):
raise TypeError('code argument has to be a callable, is type %s instead' % type(code))
if namespace is not None and not callable(namespace):
raise TypeError('namespace argument has to be a callable, is type %s instead' % type(code))
self._implementations[target] = FunctionImplementation(name=name,
code=code,
namespace=namespace,
dependencies=dependencies,
availability_check=availability_check,
dynamic=True,
compiler_kwds=compiler_kwds)
def __len__(self):
return len(self._implementations)
def __iter__(self):
return iter(self._implementations)
def implementation(target, code=None, namespace=None, dependencies=None,
discard_units=None, name=None, **compiler_kwds):
'''
A simple decorator to extend user-written Python functions to work with code
generation in other languages.
Parameters
----------
target : str
Name of the code generation target (e.g. ``'cython'``) for which to add
an implementation.
code : str or dict-like, optional
What kind of code the target language expects is language-specific,
e.g. C++ code allows for a dictionary of code blocks instead of a
single string.
namespaces : dict-like, optional
A namespace dictionary (i.e. a mapping of names to values) that
should be added to a `CodeObject` namespace when using this function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
discard_units: bool, optional
Numpy functions can internally make use of the unit system. However,
during a simulation run, state variables are passed around as unitless
values for efficiency. If `discard_units` is set to ``False``, input
arguments will have units added to them so that the function can still
use units internally (the units will be stripped away from the return
value as well). Alternatively, if `discard_units` is set to ``True``,
the function will receive unitless values as its input. The namespace
of the function will be altered to make references to units (e.g.
``ms``) refer to the corresponding floating point values so that no
unit mismatch errors are raised. Note that this system cannot work in
all cases, e.g. it does not work with functions that internally imports
values (e.g. does ``from brian2 import ms``) or access values with
units indirectly (e.g. uses ``brian2.ms`` instead of ``ms``). If no
value is given, defaults to the preference setting
`codegen.runtime.numpy.discard_units`.
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
compiler_kwds : dict, optional
Additional keyword arguments will be transferred to the code generation
stage, e.g. for C++-based targets, the code can make use of additional
header files by providing a list of strings as the ``headers`` argument.
Notes
-----
While it is in principle possible to provide a numpy implementation
as an argument for this decorator, this is normally not necessary -- the
numpy implementation should be provided in the decorated function.
If this decorator is used with other decorators such as `check_units` or
`declare_types`, it should be the uppermost decorator (that is, the
last one to be applied).
Examples
--------
Sample usage::
@implementation('cpp',"""
#include<math.h>
inline double usersin(double x)
{
return sin(x);
}
""")
def usersin(x):
return sin(x)
'''
def do_user_implementation(func):
# Allow nesting of decorators
if isinstance(func, Function):
function = func
else:
function = Function(func)
if discard_units: # Add a numpy implementation that discards units
if not (target == 'numpy' and code is None):
raise TypeError(("'discard_units' can only be set for code "
"generation target 'numpy', without providing "
"any code."))
function.implementations.add_numpy_implementation(wrapped_func=func,
dependencies=dependencies,
discard_units=discard_units,
compiler_kwds=compiler_kwds)
else:
function.implementations.add_implementation(target, code=code,
dependencies=dependencies,
namespace=namespace,
name=name,
compiler_kwds=compiler_kwds)
# # copy any annotation attributes
# if hasattr(func, '_annotation_attributes'):
# for attrname in func._annotation_attributes:
# setattr(function, attrname, getattr(func, attrname))
# function._annotation_attributes = getattr(func, '_annotation_attributes', [])
return function
return do_user_implementation
class SymbolicConstant(Constant):
'''
Class for representing constants (e.g. pi) that are understood by sympy.
'''
def __init__(self, name, sympy_obj, value):
super(SymbolicConstant, self).__init__(name, value=value)
self.sympy_obj = sympy_obj
################################################################################
# Standard functions and constants
################################################################################
def _exprel(x):
if x.is_zero:
return S.One
else:
return (sympy.exp(x) - S.One)/x
class exprel(sympy_Function):
"""
Represents ``(exp(x) - 1)/x``.
The benefit of using ``exprel(x)`` over ``(exp(x) - 1)/x``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero, and cannot be evaluated when x is
equal to zero.
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return (sympy.exp(*self.args)*(self.args[0] - S.One) + S.One)/self.args[0]**2
else:
raise sympy.ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _exprel(*self.args)
def _eval_rewrite_as_exp(self, arg, **kwargs):
if arg.is_zero:
return S.One
else:
return (sympy.exp(arg) - S.One)/arg
_eval_rewrite_as_tractable = _eval_rewrite_as_exp
@classmethod
def eval(cls, arg):
if arg is None:
return None
if arg.is_zero:
return S.One
exp_arg = sympy.exp.eval(arg)
if exp_arg is not None:
return (exp_arg - S.One)/arg
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
return self.args[0].is_finite
_infinity_int = 1073741823 # maximum 32bit integer divided by 2
def timestep(t, dt):
'''
Converts a given time to an integer time step. This function slightly shifts
the time before dividing it by ``dt`` to make sure that multiples of ``dt``
do not end up in the preceding time step due to floating point issues. This
function is used in the refractoriness calculation.
.. versionadded:: 2.1.3
Parameters
----------
t : np.ndarray, float, Quantity
The time to convert.
dt : float or Quantity
The length of a simulation time step.
Returns
-------
ts : np.ndarray, np.int64
The time step corresponding to the given time.
Notes
-----
This function cannot handle infinity values, use big values instead (e.g.
a `NeuronGroup` will use ``-1e4*second`` as the value of the ``lastspike``
variable for neurons that never spiked).
'''
elapsed_steps = np.array((t + 1e-3*dt)/dt, dtype=np.int64)
if elapsed_steps.shape == ():
elapsed_steps = elapsed_steps.item()
return elapsed_steps
DEFAULT_FUNCTIONS = {
# numpy functions that have the same name in numpy and math.h
'cos': Function(unitsafe.cos,
sympy_func=sympy.functions.elementary.trigonometric.cos),
'sin': Function(unitsafe.sin,
sympy_func=sympy.functions.elementary.trigonometric.sin),
'tan': Function(unitsafe.tan,
sympy_func=sympy.functions.elementary.trigonometric.tan),
'cosh': Function(unitsafe.cosh,
sympy_func=sympy.functions.elementary.hyperbolic.cosh),
'sinh': Function(unitsafe.sinh,
sympy_func=sympy.functions.elementary.hyperbolic.sinh),
'tanh': Function(unitsafe.tanh,
sympy_func=sympy.functions.elementary.hyperbolic.tanh),
'exp': Function(unitsafe.exp,
sympy_func=sympy.functions.elementary.exponential.exp),
'log': Function(unitsafe.log,
sympy_func=sympy.functions.elementary.exponential.log),
'log10': Function(unitsafe.log10,
sympy_func=sympy_cfunctions.log10),
'expm1': Function(unitsafe.expm1,
sympy_func=sympy_cfunctions.expm1),
'exprel': Function(unitsafe.exprel,
sympy_func=exprel),
'log1p': Function(unitsafe.log1p,
sympy_func=sympy_cfunctions.log1p),
'sqrt': Function(np.sqrt,
sympy_func=sympy.functions.elementary.miscellaneous.sqrt,
arg_units=[None], return_unit=lambda u: u**0.5),
'ceil': Function(np.ceil,
sympy_func=sympy.functions.elementary.integers.ceiling,
arg_units=[None], return_unit=lambda u: u),
'floor': Function(np.floor,
sympy_func=sympy.functions.elementary.integers.floor,
arg_units=[None], return_unit=lambda u: u),
# numpy functions that have a different name in numpy and math.h
'arccos': Function(unitsafe.arccos,
sympy_func=sympy.functions.elementary.trigonometric.acos),
'arcsin': Function(unitsafe.arcsin,
sympy_func=sympy.functions.elementary.trigonometric.asin),
'arctan': Function(unitsafe.arctan,
sympy_func=sympy.functions.elementary.trigonometric.atan),
'abs': Function(np.abs, return_type='highest',
sympy_func=sympy.functions.elementary.complexes.Abs,
arg_units=[None], return_unit=lambda u: u),
'sign': Function(pyfunc=np.sign, sympy_func=sympy.sign, return_type='highest',
arg_units=[None], return_unit=1),
# functions that need special treatment
'rand': Function(pyfunc=rand, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'randn': Function(pyfunc=randn, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'poisson': Function(pyfunc=np.random.poisson, arg_units=[1], return_unit=1, return_type='integer',
stateless=False, auto_vectorise=True),
'clip': Function(pyfunc=np.clip,
arg_units=[None, 'a', 'a'],
arg_names=['a', 'a_min', 'a_max'],
return_type='highest',
return_unit=lambda u1, u2, u3: u1),
'int': Function(pyfunc=np.int_, return_type='integer',
arg_units=[1], return_unit=1),
'timestep': Function(pyfunc=timestep, return_type='integer',
arg_units=[second, second], return_unit=1)
}
DEFAULT_CONSTANTS = {'pi': SymbolicConstant('pi', sympy.pi, value=np.pi),
'e': SymbolicConstant('e', sympy.E, value=np.e),
'inf': SymbolicConstant('inf', S.Infinity,
value=np.inf),
'-inf': SymbolicConstant('-inf', S.NegativeInfinity,
value=-np.inf)}
| 47.140975 | 109 | 0.572499 | [
"BSD-2-Clause"
] | Ziaeemehr/brian2 | brian2/core/functions.py | 35,780 | Python |
# -*- coding: utf-8 -*-
"""SQLAlchemy models for Bio2BEL HGNC."""
from __future__ import annotations
from sqlalchemy import Column, ForeignKey, Integer, String, Table
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
from sqlalchemy.orm import relationship
from bio2bel.compath import CompathPathwayMixin, CompathProteinMixin
from .constants import MODULE_NAME
__all__ = [
'Base',
'GeneFamily',
'HumanGene',
'MouseGene',
'RatGene',
'human_mouse',
'human_rat',
]
HUMAN_GENE_TABLE_NAME = f'{MODULE_NAME}_humanGene'
HUMAN_RAT_TABLE_NAME = f'{MODULE_NAME}_humanGene_ratGene'
RAT_GENE_TABLE_NAME = f'{MODULE_NAME}_ratGene'
HUMAN_MOUSE_TABLE_NAME = f'{MODULE_NAME}_humanGene_mouseGene'
MOUSE_GENE_TABLE_NAME = f'{MODULE_NAME}_mouseGene'
GENE_FAMILY_TABLE_NAME = f'{MODULE_NAME}_geneFamily'
GENE_TO_FAMILY_TABLE_NAME = f'{MODULE_NAME}_humanGene_geneFamily'
Base: DeclarativeMeta = declarative_base()
human_mouse = Table(
HUMAN_MOUSE_TABLE_NAME,
Base.metadata,
Column('human_gene_id', Integer, ForeignKey(f'{HUMAN_GENE_TABLE_NAME}.id'), primary_key=True),
Column('mouse_gene_id', Integer, ForeignKey(f'{MOUSE_GENE_TABLE_NAME}.id'), primary_key=True),
)
human_rat = Table(
HUMAN_RAT_TABLE_NAME,
Base.metadata,
Column('human_gene_id', Integer, ForeignKey(f'{HUMAN_GENE_TABLE_NAME}.id'), primary_key=True),
Column('rat_gene_id', Integer, ForeignKey(f'{RAT_GENE_TABLE_NAME}.id'), primary_key=True),
)
human_family = Table(
GENE_TO_FAMILY_TABLE_NAME,
Base.metadata,
Column('human_gene_id', Integer, ForeignKey(f'{HUMAN_GENE_TABLE_NAME}.id'), primary_key=True),
Column('gene_family_id', Integer, ForeignKey(f'{GENE_FAMILY_TABLE_NAME}.id'), primary_key=True),
)
class HumanGene(Base, CompathProteinMixin):
"""A SQLAlchemy model for a human gene."""
__tablename__ = HUMAN_GENE_TABLE_NAME
id = Column(Integer, primary_key=True)
entrez_id = Column(String(255), doc='entrez id of the protein')
hgnc_id = Column(String(255), doc='HGNC id of the protein')
hgnc_symbol = Column(String(255), doc='HGN symbol of the protein')
class MouseGene(Base):
"""A SQLAlchemy model for a mouse gene."""
__tablename__ = MOUSE_GENE_TABLE_NAME
id = Column(Integer, primary_key=True)
entrez_id = Column(String(255), doc='entrez id of the protein')
mgi_id = Column(String(255), doc='MGI id of the protein')
mgi_symbol = Column(String(255), doc='MGI symbol of the protein')
human_genes = relationship(
HumanGene,
secondary=human_mouse,
backref='mouse_genes',
)
class RatGene(Base):
"""A SQLAlchemy model for an rat gene."""
__tablename__ = RAT_GENE_TABLE_NAME
id = Column(Integer, primary_key=True)
entrez_id = Column(String(255), doc='entrez id of the protein')
rgd_id = Column(String(255), doc='RGD id of the protein')
rgd_symbol = Column(String(255), doc='RGD symbol of the protein')
human_genes = relationship(
HumanGene,
secondary=human_rat,
backref='rat_genes',
)
class GeneFamily(CompathPathwayMixin, Base):
"""A SQLAlchemy model for an HGNC Gene family."""
__tablename__ = GENE_FAMILY_TABLE_NAME
id = Column(Integer, primary_key=True)
identifier = Column(String(255), doc='HGNC gene family id of the protein')
symbol = Column(String(255), doc='HGNC gene family symbol of the protein')
name = Column(String(255), doc='HGNC gene family name of the protein')
proteins = relationship(
HumanGene,
secondary=human_family,
backref='gene_families',
)
| 30.283333 | 100 | 0.720143 | [
"MIT"
] | bio2bel/hgnc | src/bio2bel_hgnc/models.py | 3,634 | Python |
"""
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="Zr7lk4kl3mUPXfmrsWWDcfVMuZW6PPy2fULMUv7yTwy4agkNObBGiyyGVahi78ed",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# Your stuff...
# ------------------------------------------------------------------------------
| 32.288462 | 80 | 0.495533 | [
"MIT"
] | Eversmann85/project | config/settings/test.py | 1,679 | Python |
#!/usr/bin/env python
import sys, os
import itertools, operator
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
def reader(myfile):
t = np.array([])
print(myfile)
with open(myfile) as f:
lines = f.readlines()
for line in lines:
parts = line.split(" ")
if(len(parts)>1):
t=np.append(t,float(parts[7]))
f.close()
return t
def tuples_by_dispatch_width(tuples):
ret = []
tuples_sorted = sorted(tuples, key=operator.itemgetter(0))
for key,group in itertools.groupby(tuples_sorted,operator.itemgetter(0)):
ret.append((key, zip(*map(lambda x: x[1:], list(group)))))
return ret
def printgraphs(results_tuples,filename,title):
global_ws = [1,2,4,8,16,32,64]
plt.clf()
plt.cla()
markers = ['.', 'o', 'v', '*', 'D']
fig = plt.figure()
plt.grid(True)
plt.title(title)
ax = plt.subplot(111)
ax.set_xlabel("$Threads$")
ax.set_ylabel("$Throughput(Mops/sec)$")
i = 0
c="b"
tuples_by_dw = tuples_by_dispatch_width(results_tuples)
for tuple in tuples_by_dw:
dw = tuple[0]
ws_axis = tuple[1][0]
ipc_axis = tuple[1][1]
x_ticks = np.arange(0, len(global_ws))
x_labels = map(str, global_ws)
ax.xaxis.set_ticks(x_ticks)
ax.xaxis.set_ticklabels(x_labels)
#ax.yaxis.set_ticks(np.arange(0,210,10))
print x_ticks
print ipc_axis
if(i==1): c="r"
ax.plot(x_ticks, ipc_axis, label="Configuration "+str(dw), marker=markers[i%len(markers)],color=c)
i = i + 1
lgd = ax.legend(ncol=len(tuples_by_dw), bbox_to_anchor=(0.75, -0.15), prop={'size':8})
plt.savefig(filename, bbox_extra_artists=(lgd,), bbox_inches='tight')
def lastplotter(t1,t2):
results_tuples = []
results_tuples.append((1,1,t1[0]))
results_tuples.append((1,2,t1[1]))
results_tuples.append((1,4,t1[2]))
results_tuples.append((1,8,t1[3]))
results_tuples.append((1,16,t1[4]))
results_tuples.append((1,32,t1[5]))
results_tuples.append((1,64,t1[6]))
results_tuples.append((2,1,t2[0]))
results_tuples.append((2,2,t2[1]))
results_tuples.append((2,4,t2[2]))
results_tuples.append((2,8,t2[3]))
results_tuples.append((2,16,t2[4]))
results_tuples.append((2,32,t2[5]))
results_tuples.append((2,64,t2[6]))
return results_tuples
t1 = reader('part11')
t2 = reader('part12')
t3 = reader('part21')
t4 = reader('part22')
print("Done reading files,now let's plot em!")
#print(t1,t2)
# uncomment in order to print line plots
res1 = lastplotter(t1,t2)
printgraphs(res1,'naive_bank.png','Bank accounts 1')
res2 = lastplotter(t3,t4)
printgraphs(res2,'padded_bank.png','Bank accounts 2')
| 22.025862 | 100 | 0.688845 | [
"MIT"
] | filmnoirprod/parallel_processing | ex3/z1/graph.py | 2,555 | Python |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A streaming python pipeline to read in PubSub tweets and perform
classification using Prediction API"""
import argparse
import datetime
import json
import logging
import os
import socket
import apache_beam as beam
import apache_beam.transforms.window as window
from apache_beam.io.gcp.bigquery_tools import parse_table_schema_from_json
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.transforms.util import BatchElements
from google.api_core import retry
from google.api_core import exceptions
from google.cloud import language_v1
from google.cloud.language_v1 import enums
TIMEOUT_IN_SEC = 60 * 2 # 2 minutes timeout limit
socket.setdefaulttimeout(TIMEOUT_IN_SEC)
PROJECT_ID = os.getenv('PROJECT_ID')
def get_sentiment(instances_content):
"""Analyzing Sentiment in a String
Args:
text_content The text content to analyze
"""
scores = []
client = language_v1.LanguageServiceClient()
encoding_type = enums.EncodingType.UTF8
language = 'en'
type_ = enums.Document.Type.PLAIN_TEXT
for content in instances_content:
content = content.encode('utf-8') if isinstance(content,
unicode) else str(
content)
document = {'content': content, 'type': type_, 'language': language}
try:
response = client.analyze_sentiment(document,
encoding_type=encoding_type,
timeout=30,
retry=retry.Retry(deadline=60))
# Get overall sentiment of the input document
if response.document_sentiment.score:
scores.append(response.document_sentiment.score)
else:
scores.append(-1)
logging.error(
'Document sentiment score not found for {}'.format(content))
except exceptions.GoogleAPICallError as e:
logging.exception(e)
except exceptions.RetryError as e:
logging.exception(e)
except ValueError as e:
logging.exception(e)
return scores
def prediction_helper(messages):
"""Processes PubSub messages and calls AI Platform prediction.
:param messages:
:return:
"""
# Handle single string.
if not isinstance(messages, list):
messages = [messages]
# Messages from PubSub are JSON strings
instances = list(map(lambda message: json.loads(message), messages))
# Estimate the sentiment of the 'text' of each tweet
scores = get_sentiment(
[instance['text'] for instance in instances if instance.get('text')])
if len(scores) == len(instances):
for i, instance in enumerate(instances):
logging.info('Processed {} instances.'.format(len(instances)))
instance['sentiment'] = scores[i]
return instances
logging.error('Invalid scores {} instances {}'.format(len(scores),
len(instances)))
logging.error(instances)
return
class GroupWindowsIntoBatches(beam.PTransform):
"""A composite transform that groups Pub/Sub messages based on publish
time and outputs a list of dictionaries, where each contains one message
and its publish timestamp.
"""
def __init__(self, window_size):
# Convert minutes into seconds.
self.window_size = int(window_size * 60)
def expand(self, pcoll):
return (
pcoll
# Assigns window info to each Pub/Sub message based on its
# publish timestamp.
| "Window into Fixed Intervals"
>> beam.WindowInto(window.FixedWindows(self.window_size))
| "Add timestamps to messages" >> beam.ParDo(AddTimestamps())
# Use a dummy key to group the elements in the same window.
# Note that all the elements in one window must fit into memory
# for this. If the windowed elements do not fit into memory,
# please consider using `beam.util.BatchElements`.
# https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.util.html#apache_beam.transforms.util.BatchElements
| "Add Dummy Key" >> beam.Map(lambda elem: (None, elem))
| "Groupby" >> beam.GroupByKey()
| "Abandon Dummy Key" >> beam.MapTuple(lambda _, val: val)
)
class AddTimestamps(beam.DoFn):
@staticmethod
def process(element, publish_time=beam.DoFn.TimestampParam):
"""Processes each incoming windowed element by extracting the Pub/Sub
message and its publish timestamp into a dictionary. `publish_time`
defaults to the publish timestamp returned by the Pub/Sub server. It
is bound to each element by Beam at runtime.
"""
yield {
"message_body": element.decode("utf-8"),
"publish_time": datetime.datetime.utcfromtimestamp(
float(publish_time)
).strftime("%Y-%m-%d %H:%M:%S.%f"),
}
def run(args, pipeline_args=None):
"""Executes Pipeline.
:param args:
:param pipeline_args:
:return:
"""
"""Build and run the pipeline."""
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(
pipeline_args, streaming=True, save_main_session=True
)
pipeline_options.view_as(StandardOptions).runner = args.runner
# Run on Cloud DataFlow by default
google_cloud_options = pipeline_options.view_as(GoogleCloudOptions)
google_cloud_options.project = PROJECT_ID
google_cloud_options.job_name = 'pubsub-api-bigquery'
google_cloud_options.staging_location = args.staging_location
google_cloud_options.temp_location = args.temp_location
google_cloud_options.region = args.region
p = beam.Pipeline(options=pipeline_options)
lines = p | 'read in tweets' >> beam.io.ReadFromPubSub(
topic=args.input_topic,
with_attributes=False,
id_label='tweet_id') # TODO: Change to PubSub id.
# Window them, and batch them into batches. (Not too large)
output_tweets = (lines | 'assign window key' >> beam.WindowInto(
window.FixedWindows(args.window_size))
| 'batch into n batches' >> BatchElements(
min_batch_size=args.min_batch_size,
max_batch_size=args.max_batch_size)
| 'predict sentiment' >> beam.FlatMap(
lambda messages: prediction_helper(messages))
)
# Make explicit BQ schema for output tables:
bq_schema_json = {"fields": [{"name": "id", "type": "STRING"},
{"name": "text", "type": "STRING"},
{"name": "user_id", "type": "STRING"},
{"name": "sentiment", "type": "FLOAT"},
{"name": "posted_at", "type": "TIMESTAMP"},
{"name": "favorite_count", "type": "INTEGER"},
{"name": "retweet_count", "type": "INTEGER"},
{"name": "media", "type": "STRING"},
]}
bq_schema = parse_table_schema_from_json(json.dumps(bq_schema_json))
# Write to BigQuery
output_tweets | 'store twitter posts' >> beam.io.WriteToBigQuery(
table=args.bigquery_table,
dataset=args.bigquery_dataset,
schema=bq_schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
project=PROJECT_ID
)
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input-topic',
help='The Cloud Pub/Sub topic to read from.\n'
'projects/<PROJECT_NAME>/topics/<TOPIC_NAME>',
required=True
)
parser.add_argument(
'--region',
help='The DataFlow region',
default='us-central1'
)
parser.add_argument(
'--staging-location',
help='The DataFlow staging location',
default='gs://<bucket_name>/staging/',
required=True
)
parser.add_argument(
'--temp-location',
help='The DataFlow temp location',
default='gs://<bucket_name>/tmp/',
required=True
)
parser.add_argument(
'--bigquery-dataset',
help='BigQuery dataset',
required=True
)
parser.add_argument(
'--bigquery-table',
help='BigQuery OutPut table',
required=True
)
parser.add_argument(
'--window-size',
type=int,
default=60,
help="Output file's window size in number of seconds",
)
parser.add_argument(
'--min-batch-size',
type=int,
default=1,
help='Min batch size for Windowing',
)
parser.add_argument(
'--max-batch-size',
type=int,
default=100,
help='Min batch size for Windowing',
)
parser.add_argument(
'--runner',
type=str,
default='DataflowRunner',
help='DataFlow running mode',
)
known_args, pipeline_args = parser.parse_known_args()
run(
known_args,
pipeline_args
)
| 36.306338 | 135 | 0.626612 | [
"Apache-2.0"
] | dlminvestments/ai-platform-samples | notebooks/samples/tensorflow/sentiment_analysis/dataflow/PubSubToBigQueryWithAPI.py | 10,311 | Python |
#https://docs.pymc.io/notebooks/GLM-hierarchical-binominal-model.html
import matplotlib.pyplot as plt
import scipy.stats as stats
import numpy as np
import pandas as pd
#import seaborn as sns
import pymc3 as pm
import arviz as az
import theano.tensor as tt
np.random.seed(123)
# rat data (BDA3, p. 102)
y = np.array([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 5, 2,
5, 3, 2, 7, 7, 3, 3, 2, 9, 10, 4, 4, 4, 4, 4, 4, 4,
10, 4, 4, 4, 5, 11, 12, 5, 5, 6, 5, 6, 6, 6, 6, 16, 15,
15, 9, 4
])
n = np.array([
20, 20, 20, 20, 20, 20, 20, 19, 19, 19, 19, 18, 18, 17, 20, 20, 20,
20, 19, 19, 18, 18, 25, 24, 23, 20, 20, 20, 20, 20, 20, 10, 49, 19,
46, 27, 17, 49, 47, 20, 20, 13, 48, 50, 20, 20, 20, 20, 20, 20, 20,
48, 19, 19, 19, 22, 46, 49, 20, 20, 23, 19, 22, 20, 20, 20, 52, 46,
47, 24, 14
])
N = len(n)
def logp_ab(value):
''' prior density'''
return tt.log(tt.pow(tt.sum(value), -5/2))
with pm.Model() as model:
# Uninformative prior for alpha and beta
ab = pm.HalfFlat('ab',
shape=2,
testval=np.asarray([1., 1.]))
pm.Potential('p(a, b)', logp_ab(ab))
alpha = pm.Deterministic('alpha', ab[0])
beta = pm.Deterministic('beta', ab[1])
X = pm.Deterministic('X', tt.log(ab[0]/ab[1]))
Z = pm.Deterministic('Z', tt.log(tt.sum(ab)))
theta = pm.Beta('theta', alpha=ab[0], beta=ab[1], shape=N)
p = pm.Binomial('y', p=theta, observed=y, n=n)
#trace = pm.sample(1000, tune=2000, target_accept=0.95)
trace = pm.sample(1000, tune=500, cores=1)
#az.plot_trace(trace)
#plt.savefig('../figures/hbayes_binom_rats_trace.png', dpi=300)
print(az.summary(trace))
J = len(n)
post_mean = np.zeros(J)
samples = trace[theta]
post_mean = np.mean(samples, axis=0)
print('post mean')
print(post_mean)
alphas = trace['alpha']
betas = trace['beta']
alpha_mean = np.mean(alphas)
beta_mean = np.mean(betas)
hyper_mean = alpha_mean/(alpha_mean + beta_mean)
print('hyper mean')
print(hyper_mean)
mle = y / n
pooled_mle = np.sum(y) / np.sum(n)
print('pooled mle')
print(pooled_mle)
axes = az.plot_forest(
trace, var_names='theta', credible_interval=0.95, combined=True, colors='cycle')
y_lims = axes[0].get_ylim()
axes[0].vlines(hyper_mean, *y_lims)
plt.savefig('../figures/hbayes_binom_rats_forest95.pdf', dpi=300)
J = len(n)
fig, axs = plt.subplots(4,1, figsize=(10,10))
plt.subplots_adjust(hspace=0.3)
axs = np.reshape(axs, 4)
xs = np.arange(J)
ax = axs[0]
ax.bar(xs, y)
ax.set_title('number of postives')
ax = axs[1]
ax.bar(xs, n)
ax.set_title('popn size')
ax = axs[2]
ax.bar(xs, mle)
ax.set_ylim(0, 0.5)
ax.hlines(pooled_mle, 0, J, 'r', lw=3)
ax.set_title('MLE (red line = pooled)')
ax = axs[3]
ax.bar(xs, post_mean)
ax.hlines(hyper_mean, 0, J, 'r', lw=3)
ax.set_ylim(0, 0.5)
ax.set_title('posterior mean (red line = hparam)')
plt.savefig('../figures/hbayes_binom_rats_barplot.pdf', dpi=300)
J = len(n)
xs = np.arange(J)
fig, ax = plt.subplots(1,1)
ax.bar(xs, y)
ax.set_title('number of postives')
plt.savefig('../figures/hbayes_binom_rats_outcomes.pdf', dpi=300)
fig, ax = plt.subplots(1,1)
ax.bar(xs, n)
ax.set_title('popn size')
plt.savefig('../figures/hbayes_binom_rats_popsize.pdf', dpi=300)
fig, ax = plt.subplots(1,1)
ax.bar(xs, mle)
ax.set_ylim(0, 0.5)
ax.hlines(pooled_mle, 0, J, 'r', lw=3)
ax.set_title('MLE (red line = pooled)')
plt.savefig('../figures/hbayes_binom_rats_MLE.pdf', dpi=300)
fig, ax = plt.subplots(1,1)
ax.bar(xs, post_mean)
ax.hlines(hyper_mean, 0, J, 'r', lw=3)
ax.set_ylim(0, 0.5)
ax.set_title('posterior mean (red line = hparam)')
plt.savefig('../figures/hbayes_binom_rats_postmean.pdf', dpi=300)
| 26.103448 | 84 | 0.620872 | [
"MIT"
] | NamDinhRobotics/pyprobml | scripts/hbayes_binom_rats_pymc3.py | 3,785 | Python |
import os
from os.path import join
from ...utils import remove
from .. import run_nbgrader
from .base import BaseTestApp
class TestNbGraderExport(BaseTestApp):
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["export", "--help-all"])
def test_export(self, db, course_dir):
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.CourseDirectory.db_assignments = [
dict(name='ps1', duedate='2015-02-02 14:58:23.948203 PST'),
dict(name='ps2', duedate='2015-02-02 14:58:23.948203 PST')]\n""")
fh.write("""c.CourseDirectory.db_students = [dict(id="foo"), dict(id="bar")]""")
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps2", "p1.ipynb"))
run_nbgrader(["assign", "ps1", "--db", db])
run_nbgrader(["assign", "ps2", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "bar", "ps1", "p1.ipynb"))
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps2", "p1.ipynb"))
run_nbgrader(["autograde", "ps1", "--db", db])
run_nbgrader(["autograde", "ps2", "--db", db])
run_nbgrader(["export", "--db", db])
assert os.path.isfile("grades.csv")
with open("grades.csv", "r") as fh:
contents = fh.readlines()
assert len(contents) == 5
run_nbgrader(["export", "--db", db, "--to", "mygrades.csv"])
assert os.path.isfile("mygrades.csv")
remove("grades.csv")
run_nbgrader(["export", "--db", db, "--exporter", "nbgrader.plugins.CsvExportPlugin"])
assert os.path.isfile("grades.csv")
run_nbgrader(["export", "--db", db, "--exporter=nbgrader.tests.apps.files.myexporter.MyExporter", "--to", "foo.txt"])
assert os.path.isfile("foo.txt")
| 43.531915 | 125 | 0.600684 | [
"BSD-3-Clause-Clear"
] | FrattisUC/nbgrader | nbgrader/tests/apps/test_nbgrader_export.py | 2,046 | Python |
#MenuTitle: Set Transform Origin
# -*- coding: utf-8 -*-
__doc__="""
Sets origin point for Rotate tool.
"""
import vanilla
class SetTransformOriginWindow( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 370
windowHeight = 60
windowWidthResize = 0 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Set Transform Origin", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.SetTransformOriginWindow.mainwindow" # stores last window position and size
)
# UI elements:
self.w.text_1 = vanilla.TextBox( (15-1, 12+3, 75, 14), "Origin:", sizeStyle='small' )
self.w.originX = vanilla.EditText( (65, 12, 70, 15+3), "0.0", sizeStyle = 'small')
self.w.originY = vanilla.EditText( (65+80, 12, 70, 15+3), "0.0", sizeStyle = 'small')
# Run Button:
self.w.resetButton = vanilla.Button((65+160, 12+1, 60, 15), "Get", sizeStyle='small', callback=self.GetTransformOrigin )
self.w.runButton = vanilla.Button((65+160+70, 12+1, 60, 15), "Set", sizeStyle='small', callback=self.SetTransformOriginMain )
self.w.setDefaultButton( self.w.runButton )
# Load Settings:
if not self.GetTransformOrigin(None):
print "Note: 'Set Transform Origin' could not load preferences. Will resort to defaults"
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def GetTransformOrigin( self, sender ):
try:
myController = Glyphs.currentDocument.windowController()
rotateToolClass = NSClassFromString("GlyphsToolRotate")
myRotateTool = myController.toolForClass_( rotateToolClass )
currentOrigin = myRotateTool.transformOrigin()
self.w.originX.set( str(currentOrigin.x) )
self.w.originY.set( str(currentOrigin.y) )
except:
return False
return True
def SetTransformOriginMain( self, sender ):
try:
newOriginX = float(self.w.originX.get())
newOriginY = float(self.w.originY.get())
newOriginPoint = NSPoint( newOriginX, newOriginY )
myController = Glyphs.currentDocument.windowController()
myController.graphicView().setNeedsDisplay_(False)
rotateToolClass = NSClassFromString("GlyphsToolRotate")
myRotateTool = myController.toolForClass_( rotateToolClass )
myRotateTool.setTransformOrigin_( newOriginPoint )
myController.graphicView().setNeedsDisplay_(True)
except Exception, e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print "Set Transform Origin Error: %s" % e
SetTransformOriginWindow() | 35.769231 | 127 | 0.719713 | [
"Apache-2.0"
] | eliheuer/Glyphs-Scripts | Paths/Set Transform Origin.py | 2,790 | Python |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Truncated Cauchy distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import sigmoid as sigmoid_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.math import numeric
from tensorflow_probability.python.math import special as tfp_math
__all__ = [
'TruncatedCauchy',
]
def _cauchy_cdf_diff(x, y):
return tfp_math.atan_difference(x, y) / np.pi
class TruncatedCauchy(distribution.Distribution):
"""The Truncated Cauchy distribution.
The truncated Cauchy is a Cauchy distribution bounded between `low`
and `high` (the pdf is 0 outside these bounds and renormalized).
Samples from this distribution are differentiable with respect to `loc`
and `scale`, but not with respect to the bounds `low` and `high`.
### Mathematical Details
The probability density function (pdf) of this distribution is:
```none
pdf(x; loc, scale, low, high) =
{ 1 / (pi * scale * (1 + z**2) * A) for low <= x <= high
{ 0 otherwise
z = (x - loc) / scale
A = CauchyCDF((high - loc) / scale) - CauchyCDF((low - loc) / scale)
```
where:
* `CauchyCDF` is the cumulative density function of the Cauchy distribution
with 0 mean and unit variance.
This is a scalar distribution so the event shape is always scalar and the
dimensions of the parameters define the batch_shape.
#### Examples
```python
tfd = tfp.distributions
# Define a batch of two scalar TruncatedCauchy distributions with modes
# at 0. and 1.0 .
dist = tfd.TruncatedCauchy(loc=[0., 1.], scale=1.,
low=[-1., 0.],
high=[1., 1.])
# Evaluate the pdf of the distributions at 0.5 and 0.8 respectively returning
# a 2-vector tensor.
dist.prob([0.5, 0.8])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
"""
def __init__(self,
loc,
scale,
low,
high,
validate_args=False,
allow_nan_stats=True,
name='TruncatedCauchy'):
"""Construct a TruncatedCauchy.
All parameters of the distribution will be broadcast to the same shape,
so the resulting distribution will have a batch_shape of the broadcast
shape of all parameters.
Args:
loc: Floating point tensor; the modes of the corresponding non-truncated
Cauchy distribution(s).
scale: Floating point tensor; the scales of the distribution(s).
Must contain only positive values.
low: `float` `Tensor` representing lower bound of the distribution's
support. Must be such that `low < high`.
high: `float` `Tensor` representing upper bound of the distribution's
support. Must be such that `low < high`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked at run-time.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value '`NaN`' to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([loc, scale, low, high], tf.float32)
self._loc = tensor_util.convert_nonref_to_tensor(
loc, name='loc', dtype=dtype)
self._scale = tensor_util.convert_nonref_to_tensor(
scale, name='scale', dtype=dtype)
self._low = tensor_util.convert_nonref_to_tensor(
low, name='low', dtype=dtype)
self._high = tensor_util.convert_nonref_to_tensor(
high, name='high', dtype=dtype)
dtype_util.assert_same_float_dtype(
[self._loc, self._scale, self._low, self._high])
super(TruncatedCauchy, self).__init__(
dtype=dtype,
# Samples do not have gradients with respect to `_low` and `_high`.
# TODO(b/161297284): Implement these gradients.
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
def _loc_scale_low_high(self, loc=None, scale=None, low=None, high=None):
loc = tf.convert_to_tensor(self.loc if loc is None else loc)
scale = tf.convert_to_tensor(self.scale if scale is None else scale)
low = tf.convert_to_tensor(self.low if low is None else low)
high = tf.convert_to_tensor(self.high if high is None else high)
return loc, scale, low, high
def _standardized_low_and_high(self,
loc=None,
scale=None,
low=None,
high=None):
loc, scale, low, high = self._loc_scale_low_high(
loc=loc, scale=scale, low=low, high=high)
return (low - loc) / scale, (high - loc) / scale
def _normalizer(self,
loc=None,
scale=None,
low=None,
high=None,
std_low=None,
std_high=None):
if std_low is None or std_high is None:
std_low, std_high = self._standardized_low_and_high(
loc=loc, scale=scale, low=low, high=high)
return _cauchy_cdf_diff(std_high, std_low)
def _log_normalizer(self,
loc=None,
scale=None,
low=None,
high=None,
std_low=None,
std_high=None):
return tf.math.log(self._normalizer(
loc=loc,
scale=scale,
low=low,
high=high,
std_low=std_low,
std_high=std_high))
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
loc=parameter_properties.ParameterProperties(),
scale=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
low=parameter_properties.ParameterProperties(),
# TODO(b/169874884): Support decoupled parameterization.
high=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED,))
# pylint: enable=g-long-lambda
@property
def loc(self):
return self._loc
@property
def scale(self):
return self._scale
@property
def low(self):
return self._low
@property
def high(self):
return self._high
def _batch_shape(self):
return functools.reduce(
tf.broadcast_static_shape,
(self.loc.shape, self.scale.shape, self.low.shape, self.high.shape))
def _batch_shape_tensor(self, loc=None, scale=None, low=None, high=None):
return functools.reduce(
ps.broadcast_shape,
(ps.shape(self.loc if loc is None else loc),
ps.shape(self.scale if scale is None else scale),
ps.shape(self.low if low is None else low),
ps.shape(self.high if high is None else high)))
def _event_shape(self):
return tf.TensorShape([])
def _sample_n(self, n, seed=None):
loc, scale, low, high = self._loc_scale_low_high()
batch_shape = self._batch_shape_tensor(
loc=loc, scale=scale, low=low, high=high)
sample_and_batch_shape = ps.concat([[n], batch_shape], axis=0)
u = samplers.uniform(sample_and_batch_shape, dtype=self.dtype, seed=seed)
return self._quantile(u, loc=loc, scale=scale, low=low, high=high)
def _log_prob(self, x):
loc, scale, low, high = self._loc_scale_low_high()
log_prob = (
-tf.math.log1p(tf.square((x - loc) / scale))
- (np.log(np.pi) + tf.math.log(scale))
- self._log_normalizer(loc=loc, scale=scale, low=low, high=high))
# p(x) is 0 outside the bounds.
return tf.where((x > high) | (x < low),
dtype_util.as_numpy_dtype(x.dtype)(-np.inf),
log_prob)
def _cdf(self, x):
loc, scale, low, high = self._loc_scale_low_high()
std_low, std_high = self._standardized_low_and_high(
low=low, high=high, loc=loc, scale=scale)
return tf.clip_by_value(
((_cauchy_cdf_diff((x - loc) / scale, std_low))
/ self._normalizer(std_low=std_low, std_high=std_high)),
clip_value_min=0., clip_value_max=1.)
def _log_cdf(self, x):
loc, scale, low, high = self._loc_scale_low_high()
std_low, std_high = self._standardized_low_and_high(
low=low, high=high, loc=loc, scale=scale)
return (
tf.math.log(_cauchy_cdf_diff((x - loc) / scale, std_low))
- self._log_normalizer(std_low=std_low, std_high=std_high))
def _mean(self):
loc, scale, low, high = self._loc_scale_low_high()
std_low, std_high = self._standardized_low_and_high(
low=low, high=high, loc=loc, scale=scale)
# Formula from David Olive, "Applied Robust Statistics" --
# see http://parker.ad.siu.edu/Olive/ch4.pdf .
t = (tf.math.log1p(tf.math.square(std_high))
- tf.math.log1p(tf.math.square(std_low)))
t = t / (2 * tfp_math.atan_difference(std_high, std_low))
return loc + scale * t
def _mode(self):
# mode = { loc: for low <= loc <= high
# low: for loc < low
# high: for loc > high
# }
loc = tf.convert_to_tensor(self.loc)
low = tf.convert_to_tensor(self.low)
high = tf.convert_to_tensor(self.high)
shape = self._batch_shape_tensor(loc=loc, low=low, high=high)
# We *must* broadcast with scale to get a correctly shaped output, but
# TODO(b/141460015): we should not have to explicitly broadcast the first
# parameter to clip_by_value to align with the second and third parameters.
return tf.clip_by_value(tf.broadcast_to(loc, shape), low, high)
def _variance(self):
loc, scale, low, high = self._loc_scale_low_high()
std_low, std_high = self._standardized_low_and_high(
low=low, high=high, loc=loc, scale=scale)
# Formula from David Olive, "Applied Robust Statistics" --
# see http://parker.ad.siu.edu/Olive/ch4.pdf .
atan_diff = tfp_math.atan_difference(std_high, std_low)
t = (std_high - std_low - atan_diff) / atan_diff
std_mean = ((tf.math.log1p(tf.math.square(std_high))
- tf.math.log1p(tf.math.square(std_low))) / (2 * atan_diff))
return tf.math.square(scale) * (t - tf.math.square(std_mean))
def _quantile(self, p, loc=None, scale=None, low=None, high=None):
loc, scale, low, high = self._loc_scale_low_high(loc, scale, low, high)
std_low, std_high = self._standardized_low_and_high(
low=low, high=high, loc=loc, scale=scale)
# Use the sum of tangents formula.
# First, the quantile of the cauchy distribution is tan(pi * (x - 0.5)).
# and the cdf of the cauchy distribution is 0.5 + arctan(x) / np.pi
# WLOG, we will assume loc = 0 , scale = 1 (these can be taken in to account
# by rescaling and shifting low and high, and then scaling the output).
# We would like to compute quantile(p * (cdf(high) - cdf(low)) + cdf(low))
# This is the same as:
# tan(pi * (cdf(low) + (cdf(high) - cdf(low)) * p - 0.5))
# Let a = pi * (cdf(low) - 0.5), b = pi * (cdf(high) - cdf(low)) * u
# By using the formula for the cdf we have:
# a = arctan(low), b = arctan_difference(high, low) * u
# Thus the quantile is now tan(a + b).
# By appealing to the sum of tangents formula we have:
# tan(a + b) = (tan(a) + tan(b)) / (1 - tan(a) * tan(b)) =
# (low + tan(b)) / (1 - low * tan(b))
# Thus for a 'standard' truncated cauchy we have the quantile as:
# quantile(p) = (low + tan(b)) / (1 - low * tan(b)) where
# b = arctan_difference(high, low) * p.
tanb = tf.math.tan(tfp_math.atan_difference(std_high, std_low) * p)
x = (std_low + tanb) / (1 - std_low * tanb)
# Clip the answer to prevent it from falling numerically outside
# the support.
return numeric.clip_by_value_preserve_gradient(
x * scale + loc, clip_value_min=low, clip_value_max=high)
def _default_event_space_bijector(self):
return sigmoid_bijector.Sigmoid(
low=self.low, high=self.high, validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
low = None
high = None
if is_init != tensor_util.is_ref(self.low):
low = tf.convert_to_tensor(self.low)
assertions.append(
assert_util.assert_finite(low, message='`low` is not finite'))
if is_init != tensor_util.is_ref(self.high):
high = tf.convert_to_tensor(self.high)
assertions.append(
assert_util.assert_finite(high, message='`high` is not finite'))
if is_init != tensor_util.is_ref(self.loc):
assertions.append(
assert_util.assert_finite(self.loc, message='`loc` is not finite'))
if is_init != tensor_util.is_ref(self.scale):
scale = tf.convert_to_tensor(self.scale)
assertions.extend([
assert_util.assert_positive(
scale, message='`scale` must be positive'),
assert_util.assert_finite(scale, message='`scale` is not finite'),
])
if (is_init != tensor_util.is_ref(self.low) or
is_init != tensor_util.is_ref(self.high)):
low = tf.convert_to_tensor(self.low) if low is None else low
high = tf.convert_to_tensor(self.high) if high is None else high
assertions.append(
assert_util.assert_greater(
high,
low,
message='TruncatedCauchy not defined when `low >= high`.'))
return assertions
def _sample_control_dependencies(self, x):
assertions = []
if not self.validate_args:
return assertions
assertions.append(assert_util.assert_greater_equal(
x, self.low, message='Sample must be greater than or equal to `low`.'))
assertions.append(assert_util.assert_less_equal(
x, self.high, message='Sample must be less than or equal to `high`.'))
return assertions
| 39.939394 | 81 | 0.655223 | [
"Apache-2.0"
] | jeffpollock9/probability | tensorflow_probability/python/distributions/truncated_cauchy.py | 15,816 | Python |
# http://rosalind.info/problems/mmch/
from math import factorial
def nPr(n, k):
'''Returns the number of k-permutations of n.'''
return factorial(n) / factorial(n-k)
f = open("rosalind_mmch.txt", "r")
dnas = {}
currentKey = ''
for content in f:
# Beginning of a new sample
if '>' in content:
key = content.rstrip().replace('>', '')
currentKey = key
dnas[currentKey] = ''
else:
dnas[currentKey] += content.rstrip()
string = dnas[currentKey]
nbAU = [string.count(c) for c in 'AU']
nbGC = [string.count(c) for c in 'GC']
# There are nPr(max, min) edges for each AU, CG.
# Total number of edges is then the product.
maxNbMatchings = nPr(max(nbAU), min(nbAU)) * nPr(max(nbGC), min(nbGC))
print maxNbMatchings
| 25.4 | 70 | 0.635171 | [
"MIT"
] | AntoineAugusti/katas | rosalind/mmch.py | 762 | Python |
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2010 Daniel Harding <[email protected]>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Nick Bastin <[email protected]>
# Copyright (c) 2015 Michael Kefeder <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Stephane Wirtel <[email protected]>
# Copyright (c) 2015 Cosmin Poieana <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Radu Ciorba <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016, 2019 Ashley Whetter <[email protected]>
# Copyright (c) 2016, 2018 Jakub Wilk <[email protected]>
# Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016 Glenn Matthews <[email protected]>
# Copyright (c) 2016 Elias Dorneles <[email protected]>
# Copyright (c) 2016 Yannack <[email protected]>
# Copyright (c) 2016 Alex Jurkiewicz <[email protected]>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2017, 2019-2021 hippo91 <[email protected]>
# Copyright (c) 2017 danields <[email protected]>
# Copyright (c) 2017 Jacques Kvam <[email protected]>
# Copyright (c) 2017 ttenhoeve-aa <[email protected]>
# Copyright (c) 2018-2019 Nick Drozd <[email protected]>
# Copyright (c) 2018-2019 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Sergei Lebedev <[email protected]>
# Copyright (c) 2018 Lucas Cimon <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Natalie Serebryakova <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 SergeyKosarchuk <[email protected]>
# Copyright (c) 2018 Steven M. Vascellaro <[email protected]>
# Copyright (c) 2018 Mike Frysinger <[email protected]>
# Copyright (c) 2018 Chris Lamb <[email protected]>
# Copyright (c) 2018 glmdgrielson <[email protected]>
# Copyright (c) 2019 Daniel Draper <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2019 Niko Wenselowski <[email protected]>
# Copyright (c) 2019 Nikita Sobolev <[email protected]>
# Copyright (c) 2019 Oisín Moran <[email protected]>
# Copyright (c) 2019 Fantix King <[email protected]>
# Copyright (c) 2020 Peter Kolbus <[email protected]>
# Copyright (c) 2020 ethan-leba <[email protected]>
# Copyright (c) 2020 へーさん <[email protected]>
# Copyright (c) 2020 Damien Baty <[email protected]>
# Copyright (c) 2020 Ram Rachum <[email protected]>
# Copyright (c) 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2020 bernie gray <[email protected]>
# Copyright (c) 2020 Gabriel R Sezefredo <[email protected]>
# Copyright (c) 2020 Benny <[email protected]>
# Copyright (c) 2020 Anubhav <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 Andreas Finkler <[email protected]>
# Copyright (c) 2021 Or Bahari <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
| 38.534714 | 118 | 0.576528 | [
"MIT"
] | DiegoSilvaHoffmann/Small-Ecommerce | venv/lib/python3.8/site-packages/pylint/checkers/base.py | 100,471 | Python |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
# @Time : 2020-01-16 15:53
# @Author : 行颠
# @Email : [email protected]
# @File : view
# @Software: view
# @DATA : 2020-01-16
"""
import os
import asyncio
import motor.motor_asyncio
import aiohttp
from aiohttp import web
import aiohttp_jinja2
import json
import jinja2
from apps.config import *
import subprocess, shlex
from apps.ansible_config import get_ansible_hosts_data
@aiohttp_jinja2.template('package/package.html')
async def handler_package(request):
return {'name': 'Andrew', 'age': 'Svetlov'}
@aiohttp_jinja2.template('package/workflow.html')
async def handler_package_workflow(request):
return {'name': 'Andrew', 'age': 'Svetlov'}
async def handler_package_workflow_roles(request):
post = await request.post()
result = read_yaml_file(post['path'])
roles = result[0]['roles']
return web.json_response(roles)
async def handler_package_ansible_list(request):
dir_path = request.app['settings'].ansible_package_workspace
result = get_tree(dir_path)
return web.json_response(result)
async def handler_package_ansible_read(request):
post = await request.post()
data = read_t_file(post['filename'])
return web.json_response({"code": 1, "data": data})
async def handler_package_ansible_write(request):
post = await request.post()
data = write_t_file(post['path'], post['code'])
return web.json_response({"code": 1, "data": data})
async def handler_package_ws_ansible_run(request):
ws = web.WebSocketResponse()
cmdb = request.app['cmdb']
await ws.prepare(request)
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == 'close':
await ws.close()
else:
msg_data = json.loads(msg.data)
params = []
query_data = json.loads(msg_data['query'])
for i in query_data['nodeDataArray']:
if i.get("role", None) and i.get("data", None):
params.append(i.get("data"))
await ws.send_str(">> {} \r\n".format(json.dumps({"$or": params})))
# await ws.send_json({"$or":params})
import pprint
pprint.pprint({"$or": params})
result = cmdb.assets.find({"$or": params})
hosts = []
for host in await result.to_list(length=1000):
hosts.append(host)
hosts_file = get_ansible_hosts_data(hosts)
work_path = os.path.dirname(os.path.abspath(msg_data['path']))
command = "ansible -i {} all -m ping".format(hosts_file)
await ws.send_str(">> {} \r\n".format(command))
await ws.send_str(">> {} \r\n".format("result:"))
import time
time.sleep(1)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=work_path)
# 实时获取输出
while p.poll() == None:
out = p.stdout.readline().strip()
err = p.stderr
if err:
# print("sub process err: ", err)
await ws.send_str(err.decode(encoding='utf-8', errors='strict'))
if out:
# print("sub process output: ", out)
await ws.send_str(out.decode(encoding='utf-8', errors='strict'))
# 子进程返回值
await ws.send_str("return code: {} ".format(p.returncode))
return ws
| 27 | 88 | 0.567721 | [
"Apache-2.0"
] | ansible-ui/ansible-ui | apps/package/view.py | 3,727 | Python |
class ATM():
def __init__(self,balance,bankname):
self.balance = balance
self.bankname=bankname
def draw_money(self,request):
print "="*30 +"\nWelcome To " + self.bankname + "\n" + "="*30
if request > self.balance :
print "Low Account Credit"
else:
print "Current Balance is:" + str(self.balance)
self.balance-=request
while request>0 :
if request >= 100:
print "give 100"
request = request -100
elif request>=50:
print "give 50"
request -= 50
elif request >= 10:
print "give 10"
request -= 10
elif request >= 5:
print "give 5"
request -= 5
else:
print "give 1"
request -= 1
print "Balance after withdraw is:" + str(self.balance)
balance1 = 500
balance2 = 1000
atm1 = ATM(balance1,"Smart Bank")
atm2 = ATM(balance2,"Baraka Bank")
| 30.324324 | 69 | 0.459002 | [
"MIT"
] | MoustafaAscoura/wireframing_example | atmsolution/atm - with classes.py | 1,122 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Models for storing inactive and deprecated tables from the CAL-ACCESS database.
"""
from __future__ import unicode_literals
# Models
from django.db import models
from calaccess_raw import fields
from .base import CalAccessBaseModel
from django.utils.encoding import python_2_unicode_compatible
# Annotations
from calaccess_raw import annotations
from calaccess_raw.annotations import DocumentCloud
@python_2_unicode_compatible
class BallotMeasuresCd(CalAccessBaseModel):
"""
Ballot-measure dates and times.
"""
UNIQUE_KEY = "FILER_ID"
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=7),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=19),
]
election_date = fields.DateField(
db_column='ELECTION_DATE',
null=True,
help_text="Ballot measure election date"
)
filer_id = fields.IntegerField(
verbose_name='filer ID',
db_column='FILER_ID',
null=True,
db_index=True,
help_text="Filer's unique identification number",
)
measure_no = fields.CharField(
db_column='MEASURE_NO',
max_length=2,
help_text="Ballot measure number"
)
measure_name = fields.CharField(
db_column='MEASURE_NAME',
max_length=163,
help_text="Ballot measure full name"
)
measure_short_name = fields.CharField(
db_column='MEASURE_SHORT_NAME',
max_length=50,
blank=True,
help_text="Ballot measure short name"
)
jurisdiction = fields.CharField(
db_column='JURISDICTION',
max_length=9,
help_text="This field is undocumented"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'BALLOT_MEASURES_CD'
ordering = (
"-election_date",
"measure_no",
"measure_short_name",
"measure_name"
)
def __str__(self):
return self.measure_name
@python_2_unicode_compatible
class CvrF470Cd(CalAccessBaseModel):
"""
The cover page for officeholder and candidate short and supplemental forms.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"REC_TYPE",
"FORM_TYPE",
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=8),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=30, end_page=32),
DocumentCloud(id='2711616-MapCalFormat2Fields', start_page=15, end_page=16),
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=29, end_page=30),
]
FILING_FORMS = [
annotations.get_form('F470'),
]
amend_id = fields.IntegerField(
db_column="AMEND_ID",
db_index=True,
help_text="Amendment Identification number. A number of 0 is an original filing and 1 "
"to 999 amendments."
)
cand_adr1 = fields.CharField(
db_column="CAND_ADR1",
blank=True,
max_length=55,
help_text="First line of the filer's street address."
)
cand_adr2 = fields.CharField(
db_column="CAND_ADR2",
blank=True,
max_length=55,
help_text="Second line of the filer's street address. "
)
cand_city = fields.CharField(
db_column="CAND_CITY",
blank=True,
max_length=30,
help_text="Candidate/Officeholder's City."
)
cand_email = fields.CharField(
db_column="CAND_EMAIL",
blank=True,
max_length=60,
help_text="Candidate/Officeholder's EMail address. Not required by the form."
)
cand_fax = fields.CharField(
db_column="CAND_FAX",
blank=True,
max_length=20,
help_text="Candidate/Officeholder's FAX Phone Number. Not required by the form."
)
cand_phon = fields.CharField(
db_column="CAND_PHON",
blank=True,
max_length=20,
help_text="Candidate/Officeholder's phone number."
)
cand_st = fields.CharField(
db_column="CAND_ST",
blank=True,
max_length=2,
help_text="Filer's State"
)
cand_zip4 = fields.CharField(
db_column="CAND_ZIP4",
blank=True,
max_length=10,
help_text="Filer's zipcode"
)
date_1000 = fields.DateField(
db_column="DATE_1000",
help_text="Date contributions totaling $1,000 or more. (For the 470-S)"
)
dist_no = fields.CharField(
db_column="DIST_NO",
blank=True,
max_length=3,
help_text="District number for the office being sought. Populated for Senate, Assembly, "
"or Board of Equalization races."
)
elect_date = fields.DateField(
db_column="ELECT_DATE",
help_text="Date of the general election. Required for filings in even years."
)
ENTITY_CD_CHOICES = (
('CAO', annotations.choices.CAMPAIGN_ENTITY_CODES['CAO']),
)
entity_cd = fields.CharField(
db_column="ENTITY_CD",
blank=True,
choices=ENTITY_CD_CHOICES,
max_length=3,
help_text="The filer's entity code. The value of this column will always be "
"Candidate/Office Holder (CAO) for this table.",
documentcloud_pages=[
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=29)
]
)
filer_id = fields.CharField(
db_column="FILER_ID",
blank=True,
max_length=9,
help_text="Filer's unique identification number."
)
filer_namf = fields.CharField(
db_column="FILER_NAMF",
blank=True,
max_length=45,
help_text="Filer's First Name(s) - required for individuals"
)
filer_naml = fields.CharField(
db_column="FILER_NAML",
blank=True,
max_length=200,
help_text="Filer's Last Name/Committee name"
)
filer_nams = fields.CharField(
db_column="FILER_NAMS",
blank=True,
max_length=10,
help_text="Filer's Name Suffix"
)
filer_namt = fields.CharField(
db_column="FILER_NAMT",
blank=True,
max_length=10,
help_text="The filer's prefix or title that preceeds their name if they are an individual."
)
filing_id = fields.IntegerField(
db_column="FILING_ID",
db_index=True,
help_text="Unique filing identification number."
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS])
form_type = fields.CharField(
db_column="FORM_TYPE",
choices=FORM_TYPE_CHOICES,
db_index=True,
max_length=4,
help_text="Type of Filing or Formset. The value of this column will always "
"be equal to F470.",
documentcloud_pages=[
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=29),
]
)
JURIS_CD_CHOICES = annotations.sort_choices(annotations.choices.JURIS_CODES)
juris_cd = fields.CharField(
db_column="JURIS_CD",
choices=JURIS_CD_CHOICES,
blank=True,
max_length=3,
help_text="Office Jurisdiction Code",
documentcloud_pages=[
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=29),
]
)
juris_dscr = fields.CharField(
db_column="JURIS_DSCR",
blank=True,
max_length=40,
help_text="Office jurisdiction description text reqired if the jurisdiction code "
"(Juris_cd) is equal to CIT, CTY, LOC, or OTH."
)
OFF_S_H_CD_CHOICES = annotations.sort_choices(annotations.choices.OFF_S_H_CODES)
off_s_h_cd = fields.CharField(
db_column="OFF_S_H_CD",
choices=OFF_S_H_CD_CHOICES,
blank=True,
max_length=1,
help_text='Office Sought/Held code. Legal values are "S" for sought and "H" for held.',
documentcloud_pages=[
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=30),
]
)
offic_dscr = fields.CharField(
db_column="OFFIC_DSCR",
blank=True,
max_length=40,
help_text="Office sought description used if the office code is other (OTH)."
)
OFFICE_CD_CODES = annotations.sort_choices(annotations.choices.OFFICE_CODES)
office_cd = fields.CharField(
db_column="OFFICE_CD",
choices=OFFICE_CD_CODES,
blank=True,
max_length=3,
help_text="Code that identifies the office being sought. See the CAL document for "
"a list of valid codes.",
documentcloud_pages=[
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=29),
]
)
REC_TYPE_CHOICES = (
('CVR', 'Cover Page'),
)
rec_type = fields.CharField(
db_column="REC_TYPE",
choices=REC_TYPE_CHOICES,
blank=True,
max_length=3,
help_text="Type of CAL record. This column will always contain CVR.",
documentcloud_pages=[
DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22),
DocumentCloud(id='2712034-Cal-Format-201', start_page=29),
]
)
report_num = fields.CharField(
db_column="REPORT_NUM",
blank=True,
max_length=3,
help_text="Report Number; 000 Original; 001-999 Amended as reported in the filing."
)
rpt_date = fields.DateField(
db_column="RPT_DATE",
db_index=True,
null=True,
help_text="Date this report is filed as reported by the filer."
)
def __str__(self):
return str(self.amend_id)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'CVR_F470_CD'
@python_2_unicode_compatible
class FilerTypePeriodsCd(CalAccessBaseModel):
"""
Undocumented.
The table's official description contains this note: "J M needs to document. This is
in his list of tables designed for future enhancements."
"""
UNIQUE_KEY = (
"ELECTION_TYPE",
"FILER_TYPE",
"PERIOD_ID",
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=8),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=71),
]
ELECTION_TYPE_CHOICES = (
(0, 'N/A'),
(3001, 'GENERAL'),
(3002, 'PRIMARY'),
(3003, 'RECALL'),
(3004, 'SPECIAL ELECTION'),
(3005, 'OFFICEHOLDER'),
(3006, 'SPECIAL RUNOFF'),
)
election_type = fields.IntegerField(
db_column="ELECTION_TYPE",
db_index=True,
choices=ELECTION_TYPE_CHOICES,
help_text="Election type",
documentcloud_pages=[
DocumentCloud(id='2774529-Lookup-Codes-Cd', start_page=3, end_page=4),
],
)
filer_type = fields.ForeignKeyField(
'FilerTypesCd',
related_name='filing_type_periods',
db_constraint=False,
db_column="FILER_TYPE",
db_index=True,
help_text="Foreign key referencing FilerTypesCd.filer_type",
on_delete=models.CASCADE
)
period_id = fields.ForeignKeyField(
'FilingPeriodCd',
related_name='filing_type_periods',
db_constraint=False,
db_column="PERIOD_ID",
db_index=True,
help_text="Foreign key referencing FilingPeriodCd.period_id",
on_delete=models.CASCADE
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'FILER_TYPE_PERIODS_CD'
def __str__(self):
return str(self.election_type)
@python_2_unicode_compatible
class LobbyistContributions1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of campaign contributions made by lobbyists.
This table is 95 percent identical to LOBBYIST_CONTRIBUTIONS2_CD and LOBBYIST_CONTRIBUTIONS3_CD.
According to "Cal-Access Tables, Columns, Indexes", this is a temporary
table used to generate the actual Lobbyist contribution disclosure table,
which is LOBBYIST_CONTRIBUTIONS3_CD.
Also, the most recent values observed in FILING_PERIOD_START_DT are for the
April 2001, so probably this table is no longer in use.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id="2711614-CalAccessTablesWeb", start_page=10),
DocumentCloud(id="2711614-CalAccessTablesWeb", start_page=92, end_page=93),
]
filer_id = fields.IntegerField(
verbose_name='filer ID',
db_column='FILER_ID',
null=True,
db_index=True,
help_text="Filer's unique identification number",
)
filing_period_start_dt = fields.DateField(
null=True,
db_column='FILING_PERIOD_START_DT',
verbose_name='Filing period start date',
help_text='Start date of filing period',
)
filing_period_end_dt = fields.DateField(
db_column='FILING_PERIOD_END_DT',
null=True,
verbose_name='Filing period end date',
help_text='End date of filing period',
)
contribution_dt = fields.CharField(
db_column='CONTRIBUTION_DT',
max_length=32,
blank=True,
verbose_name='Contribution date',
help_text='Date of contribution',
)
recipient_name = fields.CharField(
db_column='RECIPIENT_NAME',
max_length=106,
blank=True,
help_text="Recipient's name"
)
recipient_id = fields.IntegerField(
db_column='RECIPIENT_ID',
blank=True,
null=True,
help_text="Recipient's identification number"
)
amount = fields.FloatField(
db_column='AMOUNT',
blank=True,
null=True,
help_text="Amount received"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_CONTRIBUTIONS1_CD'
ordering = ("-filing_period_start_dt",)
def __str__(self):
return str(self.filer_id)
@python_2_unicode_compatible
class LobbyistContributions2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of campaign contributions made by lobbyists.
This table is 95 percent identical to LOBBYIST_CONTRIBUTIONS1_CD and LOBBYIST_CONTRIBUTIONS3_CD.
According to "Cal-Access Tables, Columns, Indexes", this is a temporary
table used to generate the actual Lobbyist contribution disclosure table,
which is LOBBYIST_CONTRIBUTIONS3_CD.
Also, the most recent values observed in FILING_PERIOD_START_DT are for the
April 2001, so probably this table is no longer in use.
"""
UNIQUE_KEY = False
filer_id = fields.IntegerField(
verbose_name='filer ID',
db_column='FILER_ID',
null=True,
db_index=True,
help_text="Filer's unique identification number",
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=10, end_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=93, end_page=94),
]
filing_period_start_dt = fields.DateField(
null=True,
db_column='FILING_PERIOD_START_DT',
verbose_name='Filing period start date',
help_text='Start date of filing period',
)
filing_period_end_dt = fields.DateField(
db_column='FILING_PERIOD_END_DT',
null=True,
verbose_name='Filing period end date',
help_text='End date of filing period',
)
contribution_dt = fields.CharField(
db_column='CONTRIBUTION_DT',
max_length=32,
blank=True,
verbose_name='Contribution date',
help_text='Date of contribution',
)
recipient_name = fields.CharField(
db_column='RECIPIENT_NAME',
max_length=106,
blank=True,
help_text="Recipient's name"
)
recipient_id = fields.IntegerField(
db_column='RECIPIENT_ID',
blank=True,
null=True,
help_text="Recipient's identification number"
)
amount = fields.FloatField(
db_column='AMOUNT',
blank=True,
null=True,
help_text="Amount received"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_CONTRIBUTIONS2_CD'
ordering = ("-filing_period_start_dt",)
def __str__(self):
return str(self.filer_id)
@python_2_unicode_compatible
class LobbyistContributions3Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of campaign contributions made by lobbyists.
This table is 95 percent identical to LOBBYIST_CONTRIBUTIONS1_CD and LOBBYIST_CONTRIBUTIONS2_CD.
According to "Cal-Access Tables, Columns, Indexes", this is the actual
Lobbyist contribution disclosure table generated from the other two
temporary tables: LOBBYIST_CONTRIBUTIONS1_CD and LOBBYIST_CONTRIBUTIONS2_CD.
Also, the most recent values observed in FILING_PERIOD_START_DT are for the
April 2001, so probably this table is no longer in use.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=94),
]
filer_id = fields.IntegerField(
verbose_name='filer ID',
db_column='FILER_ID',
null=True,
db_index=True,
help_text="Filer's unique identification number",
)
filing_period_start_dt = fields.DateField(
null=True,
db_column='FILING_PERIOD_START_DT',
verbose_name='Filing period start date',
help_text='Start date of filing period',
)
filing_period_end_dt = fields.DateField(
db_column='FILING_PERIOD_END_DT',
null=True,
verbose_name='Filing period end date',
help_text='End date of filing period',
)
contribution_dt = fields.CharField(
db_column='CONTRIBUTION_DT',
max_length=32,
blank=True,
verbose_name='Contribution date',
help_text='Date of contribution',
)
recipient_name = fields.CharField(
db_column='RECIPIENT_NAME',
max_length=106,
blank=True,
help_text="Recipient's name"
)
recipient_id = fields.IntegerField(
db_column='RECIPIENT_ID',
blank=True,
null=True,
help_text="Recipient's identification number"
)
amount = fields.FloatField(
db_column='AMOUNT',
blank=True,
null=True,
help_text="Amount received"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_CONTRIBUTIONS3_CD'
ordering = ("-filing_period_start_dt",)
def __str__(self):
return str(self.filer_id)
@python_2_unicode_compatible
class LobbyistEmpLobbyist1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_EMP_LOBBYIST2_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_ID values span from 1995 to 2001, so probably this
table is no longer in use.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=94, end_page=95),
]
lobbyist_id = fields.IntegerField(
db_column='LOBBYIST_ID',
verbose_name="Lobbyist ID",
help_text="Lobbyist identification number"
)
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
lobbyist_last_name = fields.CharField(
db_column='LOBBYIST_LAST_NAME',
max_length=17,
help_text="Lobbyist last name"
)
lobbyist_first_name = fields.CharField(
db_column='LOBBYIST_FIRST_NAME',
max_length=17,
help_text="Lobbyist first name"
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMP_LOBBYIST1_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.lobbyist_id)
@python_2_unicode_compatible
class LobbyistEmpLobbyist2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_EMP_LOBBYIST1_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_ID values span from 1995 to 2001, so probably this
table is no longer in use.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=95),
]
lobbyist_id = fields.IntegerField(
db_column='LOBBYIST_ID',
verbose_name="Lobbyist ID",
help_text="Lobbyist identification number"
)
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
lobbyist_last_name = fields.CharField(
db_column='LOBBYIST_LAST_NAME',
max_length=17,
help_text="Lobbyist last name"
)
lobbyist_first_name = fields.CharField(
db_column='LOBBYIST_FIRST_NAME',
max_length=17,
help_text="Lobbyist first name"
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMP_LOBBYIST2_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.lobbyist_id)
@python_2_unicode_compatible
class LobbyistEmployer1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is 99 percent identical to LOBBYIST_EMPLOYER2_CD and LOBBYIST_EMPLOYER3_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the only value in observed in SESSION_YR_1 is 1999 and the only value
observed in SESSION_YR_2 is 2000.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=97, end_page=98),
]
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
current_qtr_amt = fields.FloatField(
db_column='CURRENT_QTR_AMT',
help_text="Current quarter amount"
)
session_total_amt = fields.FloatField(
db_column='SESSION_TOTAL_AMT',
help_text="Total amount for the session"
)
contributor_id = fields.IntegerField(
db_column='CONTRIBUTOR_ID',
blank=True,
null=True,
verbose_name="contributor ID",
help_text="Contributor identification number"
)
INTEREST_CD_CHOICES = (
(40301, 'AGRICULTURE'),
(40302, 'EDUCATION'),
(40303, 'ENTERTAINMENT/RECREATION'),
(40304, 'FINANCE/INSURANCE'),
(40305, 'GOVERNMENT'),
(40306, 'HEALTH'),
(40307, 'LABOR UNIONS'),
(40308, 'LEGAL'),
(40309, 'LODGING/RESTAURANTS'),
(40310, 'MANUFACTURING/INDUSTRIAL'),
(40311, 'MERCHANDISE/RETAIL'),
(40312, 'MISCELLANEOUS'),
(40313, 'OIL AND GAS'),
(40314, 'POLITICAL ORGANIZATIONS'),
(40315, 'PROFESSIONAL/TRADE'),
(40316, 'PUBLIC EMPLOYEES'),
(40317, 'REAL ESTATE'),
(40318, 'TRANSPORTATION'),
(40319, 'UTILITIES'),
)
interest_cd = fields.IntegerField(
db_column='INTEREST_CD',
choices=INTEREST_CD_CHOICES,
blank=True,
null=True,
verbose_name="interest code",
help_text='Interest Code',
documentcloud_pages=[
DocumentCloud(id='2774529-Lookup-Codes-Cd', start_page=19),
],
)
interest_name = fields.CharField(
db_column='INTEREST_NAME',
max_length=24,
blank=True,
verbose_name="Interest name",
help_text="Interest name",
)
session_yr_1 = fields.IntegerField(
db_column='SESSION_YR_1',
verbose_name="Total amount of year 1 of the session",
help_text="Total amount of year 1 of the session",
)
session_yr_2 = fields.IntegerField(
db_column='SESSION_YR_2',
verbose_name="Total amount of year 2 of the session",
help_text="Total amount of year 2 of the session",
)
yr_1_ytd_amt = fields.FloatField(
db_column='YR_1_YTD_AMT',
verbose_name="Year 1 year-to-date-amount",
help_text="Year 1 year-to-date-amount",
)
yr_2_ytd_amt = fields.FloatField(
db_column='YR_2_YTD_AMT',
verbose_name="Year 2 year-to-date-amount",
help_text="Year 2 year-to-date-amount",
)
qtr_1 = fields.FloatField(
db_column='QTR_1',
verbose_name="Quarter 1",
help_text="Quarter total amount",
)
qtr_2 = fields.FloatField(
db_column='QTR_2',
verbose_name="Quarter 2",
help_text="Quarter total amount",
)
qtr_3 = fields.FloatField(
db_column='QTR_3',
verbose_name="Quarter 3",
help_text="Quarter total amount",
)
qtr_4 = fields.FloatField(
db_column='QTR_4',
verbose_name="Quarter 4",
help_text="Quarter total amount",
)
qtr_5 = fields.FloatField(
db_column='QTR_5',
verbose_name="Quarter 5",
help_text="Quarter total amount",
)
qtr_6 = fields.FloatField(
db_column='QTR_6',
verbose_name="Quarter 6",
help_text="Quarter total amount",
)
qtr_7 = fields.FloatField(
db_column='QTR_7',
verbose_name="Quarter 7",
help_text="Quarter total amount",
)
qtr_8 = fields.FloatField(
db_column='QTR_8',
verbose_name="Quarter 8",
help_text="Quarter total amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMPLOYER1_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.employer_id)
@python_2_unicode_compatible
class LobbyistEmployer2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is 99 percent identical to LOBBYIST_EMPLOYER1_CD and LOBBYIST_EMPLOYER3_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the only value in observed in SESSION_YR_1 is 1999 and the only value
observed in SESSION_YR_2 is 2000.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=98, end_page=99),
]
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
current_qtr_amt = fields.FloatField(
db_column='CURRENT_QTR_AMT',
help_text="Current quarter amount"
)
session_total_amt = fields.FloatField(
db_column='SESSION_TOTAL_AMT',
help_text="Total amount for the session"
)
contributor_id = fields.IntegerField(
db_column='CONTRIBUTOR_ID',
blank=True,
null=True,
verbose_name="contributor ID",
help_text="Contributor identification number"
)
INTEREST_CD_CHOICES = (
(40301, 'AGRICULTURE'),
(40302, 'EDUCATION'),
(40303, 'ENTERTAINMENT/RECREATION'),
(40304, 'FINANCE/INSURANCE'),
(40305, 'GOVERNMENT'),
(40306, 'HEALTH'),
(40307, 'LABOR UNIONS'),
(40308, 'LEGAL'),
(40309, 'LODGING/RESTAURANTS'),
(40310, 'MANUFACTURING/INDUSTRIAL'),
(40311, 'MERCHANDISE/RETAIL'),
(40312, 'MISCELLANEOUS'),
(40313, 'OIL AND GAS'),
(40314, 'POLITICAL ORGANIZATIONS'),
(40315, 'PROFESSIONAL/TRADE'),
(40316, 'PUBLIC EMPLOYEES'),
(40317, 'REAL ESTATE'),
(40318, 'TRANSPORTATION'),
(40319, 'UTILITIES'),
)
interest_cd = fields.IntegerField(
db_column='INTEREST_CD',
blank=True,
null=True,
choices=INTEREST_CD_CHOICES,
verbose_name="interest code",
help_text='Interest Code',
documentcloud_pages=[
DocumentCloud(id='2774529-Lookup-Codes-Cd', start_page=19),
],
)
interest_name = fields.CharField(
db_column='INTEREST_NAME',
max_length=24,
blank=True,
help_text="Interest name"
)
session_yr_1 = fields.IntegerField(
db_column='SESSION_YR_1',
verbose_name="Total amount of year 1 of the session",
help_text="Total amount of year 1 of the session",
)
session_yr_2 = fields.IntegerField(
db_column='SESSION_YR_2',
verbose_name="Total amount of year 2 of the session",
help_text="Total amount of year 2 of the session",
)
yr_1_ytd_amt = fields.FloatField(
db_column='YR_1_YTD_AMT',
verbose_name="Year 1 year-to-date-amount",
help_text="Year 1 year-to-date-amount",
)
yr_2_ytd_amt = fields.FloatField(
db_column='YR_2_YTD_AMT',
verbose_name="Year 2 year-to-date-amount",
help_text="Year 2 year-to-date-amount",
)
qtr_1 = fields.FloatField(
db_column='QTR_1',
verbose_name="Quarter 1",
help_text="Quarter 1 total amount",
)
qtr_2 = fields.FloatField(
db_column='QTR_2',
verbose_name="Quarter 2",
help_text="Quarter 2 total amount",
)
qtr_3 = fields.FloatField(
db_column='QTR_3',
verbose_name="Quarter 3",
help_text="Quarter 3 total amount",
)
qtr_4 = fields.FloatField(
db_column='QTR_4',
verbose_name="Quarter 4",
help_text="Quarter 4 total amount",
)
qtr_5 = fields.FloatField(
db_column='QTR_5',
verbose_name="Quarter 5",
help_text="Quarter 5 total amount",
)
qtr_6 = fields.FloatField(
db_column='QTR_6',
verbose_name="Quarter 6",
help_text="Quarter 6 total amount",
)
qtr_7 = fields.FloatField(
db_column='QTR_7',
verbose_name="Quarter 7",
help_text="Quarter 7 total amount",
)
qtr_8 = fields.FloatField(
db_column='QTR_8',
verbose_name="Quarter 8",
help_text="Quarter 8 total amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMPLOYER2_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.employer_id)
@python_2_unicode_compatible
class LobbyistEmployer3Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is 99 percent identical to LOBBYIST_EMPLOYER1_CD and LOBBYIST_EMPLOYER2_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the only value in observed in SESSION_YR_1 is 1999 and the only value
observed in SESSION_YR_2 is 2000.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=99),
]
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
current_qtr_amt = fields.FloatField(
db_column='CURRENT_QTR_AMT',
help_text="Current quarter amount"
)
session_total_amt = fields.FloatField(
db_column='SESSION_TOTAL_AMT',
help_text="Total amount for the session"
)
contributor_id = fields.IntegerField(
db_column='CONTRIBUTOR_ID',
blank=True,
null=True,
verbose_name="contributor ID",
help_text="Contributor identification number"
)
INTEREST_CD_CHOICES = (
(40301, 'AGRICULTURE'),
(40302, 'EDUCATION'),
(40303, 'ENTERTAINMENT/RECREATION'),
(40304, 'FINANCE/INSURANCE'),
(40305, 'GOVERNMENT'),
(40306, 'HEALTH'),
(40307, 'LABOR UNIONS'),
(40308, 'LEGAL'),
(40309, 'LODGING/RESTAURANTS'),
(40310, 'MANUFACTURING/INDUSTRIAL'),
(40311, 'MERCHANDISE/RETAIL'),
(40312, 'MISCELLANEOUS'),
(40313, 'OIL AND GAS'),
(40314, 'POLITICAL ORGANIZATIONS'),
(40315, 'PROFESSIONAL/TRADE'),
(40316, 'PUBLIC EMPLOYEES'),
(40317, 'REAL ESTATE'),
(40318, 'TRANSPORTATION'),
(40319, 'UTILITIES'),
)
interest_cd = fields.IntegerField(
db_column='INTEREST_CD',
blank=True,
null=True,
choices=INTEREST_CD_CHOICES,
verbose_name="interest code",
help_text='Interest Code',
documentcloud_pages=[
DocumentCloud(id='2774529-Lookup-Codes-Cd', start_page=19),
],
)
interest_name = fields.CharField(
db_column='INTEREST_NAME',
max_length=24,
blank=True,
help_text="Interest name"
)
session_yr_1 = fields.IntegerField(
db_column='SESSION_YR_1',
verbose_name="Total amount of year 1 of the session",
help_text="Total amount of year 1 of the session",
)
session_yr_2 = fields.IntegerField(
db_column='SESSION_YR_2',
verbose_name="Total amount of year 2 of the session",
help_text="Total amount of year 2 of the session",
)
yr_1_ytd_amt = fields.FloatField(
db_column='YR_1_YTD_AMT',
verbose_name="Year 1 year-to-date-amount",
help_text="Year 1 year-to-date-amount",
)
yr_2_ytd_amt = fields.FloatField(
db_column='YR_2_YTD_AMT',
verbose_name="Year 2 year-to-date-amount",
help_text="Year 2 year-to-date-amount",
)
qtr_1 = fields.FloatField(
db_column='QTR_1',
verbose_name="Quarter 1",
help_text="Quarter total amount",
)
qtr_2 = fields.FloatField(
db_column='QTR_2',
verbose_name="Quarter 2",
help_text="Quarter total amount",
)
qtr_3 = fields.FloatField(
db_column='QTR_3',
verbose_name="Quarter 3",
help_text="Quarter total amount",
)
qtr_4 = fields.FloatField(
db_column='QTR_4',
verbose_name="Quarter 4",
help_text="Quarter total amount",
)
qtr_5 = fields.FloatField(
db_column='QTR_5',
verbose_name="Quarter 5",
help_text="Quarter total amount",
)
qtr_6 = fields.FloatField(
db_column='QTR_6',
verbose_name="Quarter 6",
help_text="Quarter total amount",
)
qtr_7 = fields.FloatField(
db_column='QTR_7',
verbose_name="Quarter 7",
help_text="Quarter total amount",
)
qtr_8 = fields.FloatField(
db_column='QTR_8',
verbose_name="Quarter 8",
help_text="Quarter total amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMPLOYER3_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.employer_id)
@python_2_unicode_compatible
class LobbyistEmployerFirms1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_EMPLOYER_FIRMS2_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also the distinct SESSION_ID values span from 1995 to 2001, so probably this
table is no longer in use.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=95, end_page=96),
]
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
termination_dt = fields.CharField(
verbose_name='termination date',
db_column='TERMINATION_DT',
max_length=32,
blank=True,
help_text="Termination effective date"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMPLOYER_FIRMS1_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.employer_id)
@python_2_unicode_compatible
class LobbyistEmployerFirms2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_EMPLOYER_FIRMS1_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also the distinct SESSION_ID values span from 1995 to 2001, so probably this
table is no longer in use.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=96),
]
employer_id = fields.IntegerField(
db_column='EMPLOYER_ID',
help_text="Employer identification number",
verbose_name="Employer ID"
)
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
termination_dt = fields.CharField(
verbose_name='termination date',
db_column='TERMINATION_DT',
max_length=32,
blank=True,
help_text="Termination effective date"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMPLOYER_FIRMS2_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.employer_id)
@python_2_unicode_compatible
class LobbyistEmployerHistoryCd(CalAccessBaseModel):
"""
Undocumented.
An empty file of the same name is included in the Secretary of State's daily CAL-ACCESS database exports.
This table is documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation
on these tables. Cox 5/11/2000".
Also, the columns on this table are identical to the columns on the
LOBBYIST_EMPLOYER1_CD, LOBBYIST_EMPLOYER2_CD and LOBBYIST_EMPLOYER3_CD
tables.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=96, end_page=97),
]
contributor_id = fields.IntegerField(
db_column="CONTRIBUTOR_ID",
help_text="Contributor identification number."
)
current_qtr_amt = fields.IntegerField(
db_column="CURRENT_QTR_AMT",
help_text="Current Quarter Amount"
)
employer_id = fields.IntegerField(
db_column="EMPLOYER_ID",
help_text="Employer identification number."
)
employer_name = fields.CharField(
db_column="EMPLOYER_NAME",
max_length=300,
blank=True,
help_text="Employer Name"
)
INTEREST_CD_CHOICES = (
(40301, 'AGRICULTURE'),
(40302, 'EDUCATION'),
(40303, 'ENTERTAINMENT/RECREATION'),
(40304, 'FINANCE/INSURANCE'),
(40305, 'GOVERNMENT'),
(40306, 'HEALTH'),
(40307, 'LABOR UNIONS'),
(40308, 'LEGAL'),
(40309, 'LODGING/RESTAURANTS'),
(40310, 'MANUFACTURING/INDUSTRIAL'),
(40311, 'MERCHANDISE/RETAIL'),
(40312, 'MISCELLANEOUS'),
(40313, 'OIL AND GAS'),
(40314, 'POLITICAL ORGANIZATIONS'),
(40315, 'PROFESSIONAL/TRADE'),
(40316, 'PUBLIC EMPLOYEES'),
(40317, 'REAL ESTATE'),
(40318, 'TRANSPORTATION'),
(40319, 'UTILITIES'),
)
interest_cd = fields.IntegerField(
db_column="INTEREST_CD",
choices=INTEREST_CD_CHOICES,
verbose_name='interest code',
help_text='Interest Code',
documentcloud_pages=[
DocumentCloud(id='2774529-Lookup-Codes-Cd', start_page=19),
],
)
interest_name = fields.CharField(
db_column="INTEREST_NAME",
max_length=300,
blank=True,
verbose_name="Interest name.",
help_text="Interest name.",
)
qtr_1 = fields.IntegerField(
db_column="QTR_1",
verbose_name="quarter 1 amount",
help_text="Quarter 1 total amount.",
)
qtr_2 = fields.IntegerField(
db_column="QTR_2",
verbose_name="quarter 2 amount.",
help_text="Quarter 2 total amount.",
)
qtr_3 = fields.IntegerField(
db_column="QTR_3",
verbose_name="quarter 3 amount",
help_text="Quarter 3 total amount.",
)
qtr_4 = fields.IntegerField(
db_column="QTR_4",
verbose_name="quarter 4 amount",
help_text="Quarter 4 total amount.",
)
qtr_5 = fields.IntegerField(
db_column="QTR_5",
verbose_name="quarter 5 amount5",
help_text="Quarter 5 total amount.",
)
qtr_6 = fields.IntegerField(
db_column="QTR_6",
verbose_name="quarter 6 amount.",
help_text="Quarter 6 total amount.",
)
qtr_7 = fields.IntegerField(
db_column="QTR_7",
verbose_name="quarter 7 amount.",
help_text="Quarter 7 total amount.",
)
qtr_8 = fields.IntegerField(
db_column="QTR_8",
verbose_name="quarter 8 amount.",
help_text="Quarter 8 total amount.",
)
session_id = fields.IntegerField(
db_column="SESSION_ID",
verbose_name="session identification number.",
help_text="Session identification number.",
)
session_total_amt = fields.IntegerField(
db_column="SESSION_TOTAL_AMT",
verbose_name="session total amount",
help_text="Total amount for the session.",
)
session_yr_1 = fields.IntegerField(
db_column="SESSION_YR_1",
verbose_name="session year 1",
help_text="Total amount for year 1 of the session.",
)
session_yr_2 = fields.IntegerField(
db_column="SESSION_YR_2",
verbose_name="session year 2",
help_text="Total amount for year 2 of the session.",
)
yr_1_ytd_amt = fields.IntegerField(
db_column="YR_1_YTD_AMT",
help_text="Year 1 year to date amount.",
)
yr_2_ytd_amt = fields.IntegerField(
db_column="YR_2_YTD_AMT",
help_text="Year 2 year to date amount.",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_EMPLOYER_HISTORY_CD'
def __str__(self):
return str(self.contributor_id)
@python_2_unicode_compatible
class LobbyistFirm1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM2_CD and LOBBYIST_FIRM3_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_YR_1 values are 2001, and the distinct SESSION_YR_2
are 2002.
"""
UNIQUE_KEY = False
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=103, end_page=104),
]
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
current_qtr_amt = fields.FloatField(
db_column='CURRENT_QTR_AMT',
help_text="Current quarter amount"
)
session_total_amt = fields.FloatField(
db_column='SESSION_TOTAL_AMT',
help_text="Total amount for the session"
)
contributor_id = fields.IntegerField(
db_column='CONTRIBUTOR_ID',
blank=True,
null=True,
verbose_name="contributor ID",
help_text="Contributor identification number"
)
session_yr_1 = fields.IntegerField(
db_column='SESSION_YR_1',
help_text="Total amount of year 1 of the session",
)
session_yr_2 = fields.IntegerField(
db_column='SESSION_YR_2',
help_text="Total amount of year 2 of the session",
)
yr_1_ytd_amt = fields.FloatField(
db_column='YR_1_YTD_AMT',
verbose_name="Year 1 year-to-date-amount",
help_text="Year 1 year-to-date-amount",
)
yr_2_ytd_amt = fields.FloatField(
db_column='YR_2_YTD_AMT',
verbose_name="Year 2 year-to-date-amount",
help_text="Year 2 year-to-date-amount",
)
qtr_1 = fields.FloatField(
db_column='QTR_1',
verbose_name="Quarter 1",
help_text="Quarter total amount",
)
qtr_2 = fields.FloatField(
db_column='QTR_2',
verbose_name="Quarter 2",
help_text="Quarter total amount",
)
qtr_3 = fields.FloatField(
db_column='QTR_3',
verbose_name="Quarter 3",
help_text="Quarter total amount",
)
qtr_4 = fields.FloatField(
db_column='QTR_4',
verbose_name="Quarter 4",
help_text="Quarter total amount",
)
qtr_5 = fields.FloatField(
db_column='QTR_5',
verbose_name="Quarter 5",
help_text="Quarter total amount",
)
qtr_6 = fields.FloatField(
db_column='QTR_6',
verbose_name="Quarter 6",
help_text="Quarter total amount",
)
qtr_7 = fields.FloatField(
db_column='QTR_7',
verbose_name="Quarter 7",
help_text="Quarter total amount",
)
qtr_8 = fields.FloatField(
db_column='QTR_8',
verbose_name="Quarter 8",
help_text="Quarter total amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM1_CD'
def __str__(self):
return str(self.firm_id)
@python_2_unicode_compatible
class LobbyistFirm2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM1_CD and LOBBYIST_FIRM3_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_YR_1 values are 2001, and the distinct SESSION_YR_2
are 2002.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=104),
]
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
current_qtr_amt = fields.FloatField(
db_column='CURRENT_QTR_AMT',
help_text="Current quarter amount"
)
session_total_amt = fields.FloatField(
db_column='SESSION_TOTAL_AMT',
help_text="Total amount for the session"
)
contributor_id = fields.IntegerField(
db_column='CONTRIBUTOR_ID',
blank=True,
null=True,
verbose_name="contributor ID",
help_text="Contributor identification number"
)
session_yr_1 = fields.IntegerField(
db_column='SESSION_YR_1',
help_text="Total amount of year 1 of the session",
)
session_yr_2 = fields.IntegerField(
db_column='SESSION_YR_2',
help_text="Total amount of year 2 of the session",
)
yr_1_ytd_amt = fields.FloatField(
db_column='YR_1_YTD_AMT',
verbose_name="Year 1 year-to-date-amount",
help_text="Year 1 year-to-date-amount",
)
yr_2_ytd_amt = fields.FloatField(
db_column='YR_2_YTD_AMT',
verbose_name="Year 2 year-to-date-amount",
help_text="Year 2 year-to-date-amount",
)
qtr_1 = fields.FloatField(
db_column='QTR_1',
verbose_name="Quarter 1",
help_text="Quarter total amount",
)
qtr_2 = fields.FloatField(
db_column='QTR_2',
verbose_name="Quarter 2",
help_text="Quarter total amount",
)
qtr_3 = fields.FloatField(
db_column='QTR_3',
verbose_name="Quarter 3",
help_text="Quarter total amount",
)
qtr_4 = fields.FloatField(
db_column='QTR_4',
verbose_name="Quarter 4",
help_text="Quarter total amount",
)
qtr_5 = fields.FloatField(
db_column='QTR_5',
verbose_name="Quarter 5",
help_text="Quarter total amount",
)
qtr_6 = fields.FloatField(
db_column='QTR_6',
verbose_name="Quarter 6",
help_text="Quarter total amount",
)
qtr_7 = fields.FloatField(
db_column='QTR_7',
verbose_name="Quarter 7",
help_text="Quarter total amount",
)
qtr_8 = fields.FloatField(
db_column='QTR_8',
verbose_name="Quarter 8",
help_text="Quarter total amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM2_CD'
def __str__(self):
return str(self.firm_id)
@python_2_unicode_compatible
class LobbyistFirm3Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM1_CD and LOBBYIST_FIRM2_CD.
All three tables are documented in "Cal-Access Tables, Columns, Indexes", but
with this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, the distinct SESSION_YR_1 values are 2001, and the distinct SESSION_YR_2
are 2002.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=105),
]
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
current_qtr_amt = fields.FloatField(
db_column='CURRENT_QTR_AMT',
help_text="Current quarter amount"
)
session_total_amt = fields.FloatField(
db_column='SESSION_TOTAL_AMT',
help_text="Total amount for the session"
)
contributor_id = fields.IntegerField(
db_column='CONTRIBUTOR_ID',
blank=True,
null=True,
verbose_name="contributor ID",
help_text="Contributor identification number"
)
session_yr_1 = fields.IntegerField(
db_column='SESSION_YR_1',
help_text="Total amount of year 1 of the session",
)
session_yr_2 = fields.IntegerField(
db_column='SESSION_YR_2',
help_text="Total amount of year 2 of the session",
)
yr_1_ytd_amt = fields.FloatField(
db_column='YR_1_YTD_AMT',
verbose_name="Year 1 year-to-date-amount",
help_text="Year 1 year-to-date-amount",
)
yr_2_ytd_amt = fields.FloatField(
db_column='YR_2_YTD_AMT',
verbose_name="Year 2 year-to-date-amount",
help_text="Year 2 year-to-date-amount",
)
qtr_1 = fields.FloatField(
db_column='QTR_1',
verbose_name="Quarter 1",
help_text="Quarter total amount",
)
qtr_2 = fields.FloatField(
db_column='QTR_2',
verbose_name="Quarter 2",
help_text="Quarter total amount",
)
qtr_3 = fields.FloatField(
db_column='QTR_3',
verbose_name="Quarter 3",
help_text="Quarter total amount",
)
qtr_4 = fields.FloatField(
db_column='QTR_4',
verbose_name="Quarter 4",
help_text="Quarter total amount",
)
qtr_5 = fields.FloatField(
db_column='QTR_5',
verbose_name="Quarter 5",
help_text="Quarter total amount",
)
qtr_6 = fields.FloatField(
db_column='QTR_6',
verbose_name="Quarter 6",
help_text="Quarter total amount",
)
qtr_7 = fields.FloatField(
db_column='QTR_7',
verbose_name="Quarter 7",
help_text="Quarter total amount",
)
qtr_8 = fields.FloatField(
db_column='QTR_8',
verbose_name="Quarter 8",
help_text="Quarter total amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM3_CD'
def __str__(self):
return str(self.firm_id)
@python_2_unicode_compatible
class LobbyistFirmEmployer1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM_EMPLOYER2_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also RPT_START and RPT_END each contain only one distinct value, "2001-04-01"
and "2001-06-30", respectively.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=100),
]
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
filing_sequence = fields.IntegerField(
db_column='FILING_SEQUENCE',
help_text="Amendment number. 0 is the original filing. \
1 to 999 are amendments"
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
rpt_start = fields.DateField(
db_column='RPT_START',
null=True,
help_text="Starting date for the period the report covers"
)
rpt_end = fields.DateField(
db_column='RPT_END',
null=True,
help_text="Ending date for the period the report covers"
)
per_total = fields.FloatField(
db_column='PER_TOTAL',
help_text="Total this reporting period"
)
cum_total = fields.FloatField(
db_column='CUM_TOTAL',
help_text='Cumulative total to date'
)
lby_actvty = fields.CharField(
db_column='LBY_ACTVTY',
max_length=182,
blank=True,
help_text="Description of lobbying activity"
)
ext_lby_actvty = fields.CharField(
db_column='EXT_LBY_ACTVTY',
max_length=32,
blank=True,
help_text="This field is undocumented"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM_EMPLOYER1_CD'
ordering = ("-rpt_start",)
def __str__(self):
return str(self.firm_id)
@python_2_unicode_compatible
class LobbyistFirmEmployer2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM_EMPLOYER1_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also RPT_START and RPT_END each contain only one distinct value, "2001-04-01"
and "2001-06-30", respectively.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=11, end_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=100, end_page=101),
]
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identification number"
)
filing_sequence = fields.IntegerField(
db_column='FILING_SEQUENCE',
help_text="Amendment number. 0 is the original filing. \
1 to 999 are amendments"
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
employer_name = fields.CharField(
db_column='EMPLOYER_NAME',
max_length=300,
help_text="Employer name"
)
rpt_start = fields.DateField(
db_column='RPT_START',
null=True,
help_text="Starting date for the period the report covers"
)
rpt_end = fields.DateField(
db_column='RPT_END',
null=True,
help_text="Ending date for the period the report covers"
)
per_total = fields.FloatField(
db_column='PER_TOTAL',
help_text="Total this reporting period"
)
cum_total = fields.FloatField(
db_column='CUM_TOTAL',
help_text='Cumulative total to date'
)
lby_actvty = fields.CharField(
db_column='LBY_ACTVTY',
max_length=182,
blank=True,
help_text="Description of lobbying activity"
)
ext_lby_actvty = fields.CharField(
db_column='EXT_LBY_ACTVTY',
max_length=32,
blank=True,
help_text="This field is undocumented"
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM_EMPLOYER2_CD'
ordering = ("-rpt_start",)
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class LobbyistFirmHistoryCd(CalAccessBaseModel):
"""
Undocumented.
An empty file of the same name is included in the Secretary of State's daily CAL-ACCESS database exports.
This table is documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation
on these tables. Cox 5/11/2000".
Also, the columns on this table are identical to the columns on the
LOBBYIST_FIRM1_CD, LOBBYIST_FIRM2_CD and LOBBYIST_FIRM3_CD tables.
"""
UNIQUE_KEY = (
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=101, end_page=102),
]
contributor_id = fields.IntegerField(
db_column="CONTRIBUTOR_ID",
help_text="Contributor identification number."
)
current_qtr_amt = fields.IntegerField(
db_column="CURRENT_QTR_AMT",
help_text="Current Quarter Amount"
)
firm_id = fields.IntegerField(
db_column="FIRM_ID",
help_text="Identification number of the Firm/Employer/Coalition."
)
firm_name = fields.CharField(
db_column="FIRM_NAME",
max_length=300,
help_text="Name of Firm/Employer/Coalition"
)
qtr_1 = fields.IntegerField(
db_column="QTR_1",
help_text="Quarter total amount."
)
qtr_2 = fields.IntegerField(
db_column="QTR_2",
help_text="Quarter total amount."
)
qtr_3 = fields.IntegerField(
db_column="QTR_3",
help_text="Quarter total amount."
)
qtr_4 = fields.IntegerField(
db_column="QTR_4",
help_text="Quarter total amount."
)
qtr_5 = fields.IntegerField(
db_column="QTR_5",
help_text="Quarter total amount."
)
qtr_6 = fields.IntegerField(
db_column="QTR_6",
help_text="Quarter total amount."
)
qtr_7 = fields.IntegerField(
db_column="QTR_7",
help_text="Quarter total amount."
)
qtr_8 = fields.IntegerField(
db_column="QTR_8",
help_text="Quarter total amount."
)
session_id = fields.IntegerField(
db_column="SESSION_ID",
help_text="Session identification number."
)
session_total_amt = fields.IntegerField(
db_column="SESSION_TOTAL_AMT",
help_text="Total amount for the session."
)
session_yr_1 = fields.IntegerField(
db_column="SESSION_YR_1",
help_text="Total amount for year 1 of the session."
)
session_yr_2 = fields.IntegerField(
db_column="SESSION_YR_2",
help_text="Total amount for year 2 of the session."
)
yr_1_ytd_amt = fields.IntegerField(
db_column="YR_1_YTD_AMT",
verbose_name="Year 1 year to date amount.",
help_text="Year 1 year to date amount.",
)
yr_2_ytd_amt = fields.IntegerField(
db_column="YR_2_YTD_AMT",
verbose_name="Year 2 year to date amount",
help_text="Year 2 year to date amount",
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM_HISTORY_CD'
def __str__(self):
return str(self.contributor_id)
@python_2_unicode_compatible
class LobbyistFirmLobbyist1Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM_LOBBYIST2_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, all rows have the same SESSION_ID value: 2001.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=102),
]
lobbyist_id = fields.IntegerField(
db_column='LOBBYIST_ID',
verbose_name="Lobbyist ID",
help_text="Lobbyist identification number"
)
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
lobbyist_last_name = fields.CharField(
db_column='LOBBYIST_LAST_NAME',
max_length=15,
help_text="Lobbyist last name"
)
lobbyist_first_name = fields.CharField(
db_column='LOBBYIST_FIRST_NAME',
max_length=17,
help_text="Lobbyist first name"
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM_LOBBYIST1_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.lobbyist_id)
@python_2_unicode_compatible
class LobbyistFirmLobbyist2Cd(CalAccessBaseModel):
"""
Deprecated table for the disclosure of lobbyist relationships.
This table is identical to LOBBYIST_FIRM_LOBBYIST1_CD.
Both tables are documented in "Cal-Access Tables, Columns, Indexes", but with
this cryptic note: "Matt needs to describe the relationship between the
multiple tables. Documentation should be cloned from D H's documentation on
these tables. Cox 5/11/2000".
Also, all rows have the same SESSION_ID value: 2001.
"""
UNIQUE_KEY = False
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=12),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=102, end_page=103),
]
lobbyist_id = fields.IntegerField(
db_column='LOBBYIST_ID',
verbose_name="Lobbyist ID",
help_text="Lobbyist identification number"
)
firm_id = fields.IntegerField(
db_column='FIRM_ID',
verbose_name="Firm ID",
help_text="Identification number of the firm, employer or coalition"
)
lobbyist_last_name = fields.CharField(
db_column='LOBBYIST_LAST_NAME',
max_length=15,
help_text="Lobbyist last name"
)
lobbyist_first_name = fields.CharField(
db_column='LOBBYIST_FIRST_NAME',
max_length=17,
help_text="Lobbyist first name"
)
firm_name = fields.CharField(
db_column='FIRM_NAME',
max_length=400,
help_text="Name of firm, employer or coalition",
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'LOBBYIST_FIRM_LOBBYIST2_CD'
ordering = ("-session_id",)
def __str__(self):
return str(self.lobbyist_id)
@python_2_unicode_compatible
class EfsFilingLogCd(CalAccessBaseModel):
"""
Logs from the Electronic Filing Subsystem, which accepts and validates electronic filings.
"""
UNIQUE_KEY = (
"FILING_DATE",
"VENDOR"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711624-Overview', start_page=1, end_page=2),
DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=49, end_page=50),
]
FILING_FORMS = [
annotations.get_form('F400'),
annotations.get_form('F401'),
annotations.get_form('F402'),
annotations.get_form('F410'),
annotations.get_form('F425'),
annotations.get_form('F450'),
annotations.get_form('F460'),
annotations.get_form('F461'),
annotations.get_form('F465'),
annotations.get_form('F496'),
annotations.get_form('F497'),
annotations.get_form('F498'),
annotations.get_form('F601'),
annotations.get_form('F602'),
annotations.get_form('F603'),
annotations.get_form('F604'),
annotations.get_form('F606'),
annotations.get_form('F607'),
annotations.get_form('F615'),
annotations.get_form('F625'),
annotations.get_form('F635'),
annotations.get_form('F645'),
]
filing_date = fields.DateField(
db_column='FILING_DATE',
null=True,
help_text="Date of filing"
)
filingstatus = fields.IntegerField(
db_column='FILINGSTATUS',
help_text="Status of filing. This field is described in the docs as being\
VARCHAR. However, its distinct values are 0, 1, 2 and 7.",
)
vendor = fields.CharField(
db_column='VENDOR',
max_length=250,
help_text="Software vendor who submitted the electronic filing"
)
filer_id = fields.CharField(
verbose_name='filer ID',
db_column='FILER_ID',
max_length=250,
blank=True,
db_index=True,
help_text="Filer's unique identification number",
)
FORM_TYPE_CHOICES = tuple([(f.db_value, f.full_title) for f in FILING_FORMS]) + (
('BADFORMAT 253', 'Unknown'),
('form', 'Unknown'),
)
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=250,
help_text='Name of the source filing form or schedule',
db_index=True,
choices=FORM_TYPE_CHOICES,
verbose_name="form type",
documentcloud_pages=[
DocumentCloud(id='2711624-Overview', start_page=4, end_page=8),
]
)
error_no = fields.CharField(
db_column='ERROR_NO',
max_length=250,
help_text='Most records have a value of "ACCEPTED". Other records include "ERROR"\
or "BADFORMAT" and a three-digit number.',
)
class Meta(CalAccessBaseModel.Meta):
"""
Meta model options.
"""
db_table = 'EFS_FILING_LOG_CD'
ordering = ("-filing_date",)
def __str__(self):
return "{} ({})".format(self.vendor, self.filing_date)
| 31.938324 | 109 | 0.642508 | [
"MIT"
] | rkiddy/django-calaccess-raw-data | calaccess_raw/models/inactive.py | 75,087 | Python |
from rdflib import Graph
import requests
import ipaddress
import json
import socket
from urllib.parse import urlparse
from .base import BaseLDN
class Sender(BaseLDN):
def __init__(self, **kwargs):
super(self.__class__, self).__init__(**kwargs)
self.allow_localhost = kwargs.get('allow_localhost', False)
def __accept_post_options(self, inbox, **kwargs):
r = requests.options(inbox, **kwargs)
if r.status_code == requests.codes.ok and 'accept-post' in r.headers:
if self.JSON_LD in r.headers['accept-post']:
return self.JSON_LD
for content_type in r.headers['accept-post'].split(','):
return self.content_type_to_mime_type(content_type)
def __is_localhost(self, inbox):
return ipaddress.ip_address(socket.gethostbyname(
urlparse(inbox).hostname)).is_loopback
def __post_message(self, inbox, data, content_type, **kwargs):
if self.allow_localhost or not self.__is_localhost(inbox):
headers = kwargs.pop("headers", dict())
headers['content-type'] = content_type
r = requests.post(inbox, data=data, headers=headers, **kwargs)
r.raise_for_status()
else:
raise ValueError("Invalid local inbox.")
def send(self, inbox, data, **kwargs):
"""Send the provided data to an inbox."""
if isinstance(data, dict) or isinstance(data, list):
self.__post_message(inbox, json.dumps(data), self.JSON_LD,
**kwargs)
elif isinstance(data, str):
self.__post_message(inbox, data, self.JSON_LD, **kwargs)
elif isinstance(data, Graph):
ct = self.__accept_post_options(inbox, **kwargs) or self.JSON_LD
self.__post_message(inbox, data.serialize(format=ct), ct,
**kwargs)
else:
raise TypeError(
"You cannot send data of type {}.".format(type(data)))
| 37.351852 | 77 | 0.618741 | [
"Apache-2.0"
] | trellis-ldp/py-ldn | ldnlib/sender.py | 2,017 | Python |
from datetime import datetime
from decimal import Decimal
from logging import Logger
from typing import Callable
import pytest
from faker import Faker
from passport.domain import User
from wallet.core.entities import Account, Category, Operation, OperationPayload, OperationType
from wallet.core.services.operations import OperationService
from wallet.core.storage import Storage
StorageBuilder = Callable[[Account, Category], Storage]
@pytest.fixture(scope="function")
def prepare_storage(fake_storage: Storage, fake_coroutine) -> StorageBuilder:
def builder(account: Account, category: Category) -> Storage:
fake_storage.accounts.fetch_by_key = fake_coroutine(account)
fake_storage.categories.fetch_by_key = fake_coroutine(category)
fake_storage.operations.save = fake_coroutine(1)
return fake_storage
return builder
PayloadBuilder = Callable[[Account, Category, datetime], OperationPayload]
@pytest.fixture(scope="function")
def payload_builder(user: User):
def builder(account: Account, category: Category, created_on: datetime) -> OperationPayload:
return OperationPayload(
user=user,
amount=Decimal("199.90"),
account=account.key,
category=category.key,
operation_type=OperationType.expense,
created_on=created_on,
)
return builder
@pytest.mark.unit
async def test_success(
faker: Faker,
prepare_storage: StorageBuilder,
logger: Logger,
payload_builder: PayloadBuilder,
user: User,
account: Account,
category: Category,
) -> None:
created_on = faker.date_time_between()
service = OperationService(prepare_storage(account, category), logger)
operation = await service.add(payload=payload_builder(account, category, created_on))
expected = Operation(
amount=Decimal("199.90"),
description="",
user=user,
account=account,
category=category,
operation_type=OperationType.expense,
)
expected.key = 1
expected.created_on = created_on
assert operation == expected
| 28.333333 | 96 | 0.718118 | [
"MIT"
] | clayman-micro/wallet | tests/core/services/operations/test_add.py | 2,125 | Python |
from backtest.indicators.StochRSI import StochRSI
import backtrader as bt
from backtest.indicators.ConnorsRSI import ConnorsRSI
class CRSI(bt.Strategy):
params = (("ratio", 0.2),)
def __init__(self) -> None:
super().__init__()
self.rsi = ConnorsRSI(self.data)
def next(self):
position = self.getposition()
rsi = self.rsi[0]
if not position and rsi >= 60:
self.order_target_percent(target=self.p.ratio)
if position and rsi <= 30:
self.close()
class SRSI(bt.Strategy):
params = (("ratio", 0.95),)
def __init__(self) -> None:
super().__init__()
self.rsi = StochRSI(self.data, period=30)
self.crossover = bt.indicators.CrossOver(self.rsi, bt.LineNum(0.7))
self.crossdown = bt.indicators.CrossDown(self.rsi, bt.LineNum(0.3))
def next(self):
position = self.getposition()
if not position and self.crossover[0]:
self.order_target_percent(target=self.p.ratio)
if position and self.crossdown[0]:
self.close()
| 25.928571 | 75 | 0.618916 | [
"MIT"
] | leejh3224/bitrush | backtest/strategies/RSI.py | 1,089 | Python |
"""
A helper class for using TLS Lite with stdlib clients
(httplib, xmlrpclib, imaplib, poplib).
"""
from tlslite.Checker import Checker
class ClientHelper:
"""This is a helper class used to integrate TLS Lite with various
TLS clients (e.g. poplib, smtplib, httplib, etc.)"""
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
"""
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Then you should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.username = None
self.password = None
self.sharedKey = None
self.certChain = None
self.privateKey = None
self.checker = None
#SRP Authentication
if username and password and not \
(sharedKey or certChain or privateKey):
self.username = username
self.password = password
#Shared Key Authentication
elif username and sharedKey and not \
(password or certChain or privateKey):
self.username = username
self.sharedKey = sharedKey
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password or sharedKey):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
sharedKey and not certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
#Authenticate the server based on its cryptoID or fingerprint
if sharedKey and (cryptoID or protocol or x509Fingerprint):
raise ValueError("Can't use shared keys with other forms of"\
"authentication")
self.checker = Checker(cryptoID, protocol, x509Fingerprint,
x509TrustList, x509CommonName)
self.settings = settings
self.tlsSession = None
def _handshake(self, tlsConnection):
if self.username and self.password:
tlsConnection.handshakeClientSRP(username=self.username,
password=self.password,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
elif self.username and self.sharedKey:
tlsConnection.handshakeClientSharedKey(username=self.username,
sharedKey=self.sharedKey,
settings=self.settings)
else:
tlsConnection.handshakeClientCert(certChain=self.certChain,
privateKey=self.privateKey,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
self.tlsSession = tlsConnection.session | 42.030675 | 76 | 0.619618 | [
"BSD-3-Clause"
] | 1065672644894730302/Chromium | third_party/tlslite/tlslite/integration/ClientHelper.py | 6,851 | Python |
""":mod:`kinsumer.version` --- Version information
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
VERSION_INFO = (0, 5, 3)
VERSION = '{}.{}.{}'.format(*VERSION_INFO)
if __name__ == '__main__':
print(VERSION)
| 22.2 | 50 | 0.477477 | [
"MIT"
] | balancehero/kinsumer | kinsumer/version.py | 222 | Python |
"""
SYNOPSIS
--------
Get the details of unused resources present across regions in the AWS account
DESCRIPTION
-----------
This script provides a detailed overview of the number of unused resources present in the AWS account.
It provides service-wise details of unused resources lying around in all the regions of the AWS account.
PREREQUISITES
-------------
- Workstation with Python version 3 and above
- AWS python-based SDK: boto3
Installation command: pip3 install boto3
- pandas framework and openpyxl for reporting operations (xlsx file).
Installation command(s):
- pip3 install pandas
- pip3 install openpyxl
- User credentials (Access Key Id and Secret Accces Key) of a user having atleast the Security Audit permission and above on the AWS account
EXAMPLE
-------
This script can be executed on a python compiler (AWS Cloudshell, Powershell, bash, any command line tool with python installed)
Command: python ./unused_aws_resources.py --accessKey <AWS Access Key Id> --secretKey <AWS Secret Access Key>
OUTPUT
------
- The script will provide a summarized count of all unused resources in the account.
- For a detailed view, the user can refer to the .xlsx file that will be generated by the script.
"""
import json
import boto3
import argparse
import multiprocessing
import csv
import os
import pandas as pd
import sys
import glob
from urllib.request import urlopen
def ebs_volume(function, credentials, unused_resource_count, region_list):
print('Scanning EBS Volumes')
volume_count = 0
unused_volume_detail = []
for region in region_list:
try:
ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
volumes = list(ec2.volumes.all())
unused_volumes = set([volume.volume_id for volume in volumes if volume.state == 'available'])
for volume_id in unused_volumes:
unused_volume_detail.append({'ResourceType':'AWS::EC2::Volume','ResourceId':volume_id,'Region':region})
volume_count+=len(unused_volumes)
except:
pass
if volume_count:
unused_volume_detail = json.loads(json.dumps(unused_volume_detail))
f = csv.writer(open("./aws_logs/ebs_volume.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_volume_detail in unused_volume_detail:
f.writerow([unused_volume_detail["ResourceType"],
unused_volume_detail["ResourceId"],
unused_volume_detail["Region"]])
unused_resource_count[function] = volume_count
def elastic_ip(function, credentials, unused_resource_count, region_list):
print('Scanning Elastic IPs')
eip_count = 0
unused_eip_detail = []
for region in region_list:
try:
ec2_client = boto3.client('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
eip_data = ec2_client.describe_addresses()['Addresses']
for eip in eip_data:
try:
AssociationId = eip['AssociationId']
except:
AssociationId = ''
if not AssociationId:
unused_eip_detail.append({'ResourceType':'AWS::EC2::EIP','ResourceId':eip['AllocationId'],'Region':region})
eip_count += 1
except:
pass
if eip_count:
unused_eip_detail = json.loads(json.dumps(unused_eip_detail))
f = csv.writer(open("./aws_logs/elastic_ip.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_eip_detail in unused_eip_detail:
f.writerow([unused_eip_detail["ResourceType"],
unused_eip_detail["ResourceId"],
unused_eip_detail["Region"]])
unused_resource_count[function] = eip_count
def network_interface(function, credentials, unused_resource_count, region_list):
print('Scanning Network Interfaces')
ni_count = 0
unused_ni_detail = []
for region in region_list:
try:
ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
network_interfaces = list(ec2.network_interfaces.all())
unused_nis = set([ni.network_interface_id for ni in network_interfaces if ni.status == 'available'])
for network_interface_id in unused_nis:
unused_ni_detail.append({'ResourceType':'AWS::EC2::NetworkInterface','ResourceId':network_interface_id,'Region':region})
ni_count+=len(unused_nis)
except:
pass
if ni_count:
unused_ni_detail = json.loads(json.dumps(unused_ni_detail))
f = csv.writer(open("./aws_logs/network_interface.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_ni_detail in unused_ni_detail:
f.writerow([unused_ni_detail["ResourceType"],
unused_ni_detail["ResourceId"],
unused_ni_detail["Region"]])
unused_resource_count[function] = ni_count
def vpc(function, credentials, unused_resource_count, region_list):
print('Scanning VPCs')
vpc_count = 0
unused_vpc_detail = []
for region in region_list:
try:
ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
vpcs = list(ec2.vpcs.all())
network_interfaces = list(ec2.network_interfaces.all())
all_vpcs = set([vpc.vpc_id for vpc in vpcs])
all_active_vpcs = set([vpc['VpcId'] for ni in network_interfaces for vpc in ni.vpc])
unused_vpcs = all_vpcs - all_active_vpcs
for vpcid in unused_vpcs:
unused_vpc_detail.append({'ResourceType':'AWS::EC2::VPC','ResourceId':vpcid,'Region':region})
vpc_count+=len(unused_vpcs)
except:
pass
if vpc_count:
unused_vpc_detail = json.loads(json.dumps(unused_vpc_detail))
f = csv.writer(open("./aws_logs/vpc.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_vpc_detail in unused_vpc_detail:
f.writerow([unused_vpc_detail["ResourceType"],
unused_vpc_detail["ResourceId"],
unused_vpc_detail["Region"]])
unused_resource_count[function] = vpc_count
def subnet(function, credentials, unused_resource_count, region_list):
print('Scanning Subnets')
subnet_count = 0
unused_subnet_detail = []
for region in region_list:
try:
ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
subnets = list(ec2.subnets.all())
network_interfaces = list(ec2.network_interfaces.all())
all_subnets = set([subnet.subnet_id for subnet in subnets])
all_active_subnets = set([subnet['SubnetId'] for ni in network_interfaces for subnet in ni.subnet])
unused_subnets = all_subnets - all_active_subnets
for subnetid in unused_subnets:
unused_subnet_detail.append({'ResourceType':'AWS::EC2::Subnet','ResourceId':subnetid,'Region':region})
subnet_count+=len(unused_subnets)
except:
pass
if subnet_count:
unused_subnet_detail = json.loads(json.dumps(unused_subnet_detail))
f = csv.writer(open("./aws_logs/subnet.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_subnet_detail in unused_subnet_detail:
f.writerow([unused_subnet_detail["ResourceType"],
unused_subnet_detail["ResourceId"],
unused_subnet_detail["Region"]])
unused_resource_count[function] = subnet_count
def security_group(function, credentials, unused_resource_count, region_list):
print('Scanning Security Groups')
sg_count = 0
unused_sg_detail = []
for region in region_list:
try:
ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
sgs = list(ec2.security_groups.all())
network_interfaces = list(ec2.network_interfaces.all())
all_sgs = set([sg.group_id for sg in sgs])
all_inst_sgs = set([sg['GroupId'] for ni in network_interfaces for sg in ni.groups])
unused_sgs = all_sgs - all_inst_sgs
for sgid in unused_sgs:
unused_sg_detail.append({'ResourceType':'AWS::EC2::SecurityGroup','ResourceId':sgid,'Region':region})
sg_count+=len(unused_sgs)
except:
pass
if sg_count:
unused_sg_detail = json.loads(json.dumps(unused_sg_detail))
f = csv.writer(open("./aws_logs/security_group.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_sg_detail in unused_sg_detail:
f.writerow([unused_sg_detail["ResourceType"],
unused_sg_detail["ResourceId"],
unused_sg_detail["Region"]])
unused_resource_count[function] = sg_count
def classic_loadbalancer(function, credentials, unused_resource_count, region_list):
print('Scanning Classic Load balancers')
elb_count = 0
unused_elb_detail = []
for region in region_list:
try:
classic_lb = boto3.client('elb', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
paginated_data=[]
elb_paginator = classic_lb.get_paginator('describe_load_balancers')
for load_balancers in elb_paginator.paginate():
paginated_data.extend(load_balancers['LoadBalancerDescriptions'])
for elb_detail in paginated_data:
instance_health_status = []
instance_data = classic_lb.describe_instance_health(LoadBalancerName=elb_detail['LoadBalancerName'])['InstanceStates']
for instance in instance_data:
instance_health_status.append(instance['State'])
if 'InService' not in instance_health_status:
unused_elb_detail.append({'ResourceType':'AWS::ElasticLoadBalancing::LoadBalancer','ResourceId':elb_detail['LoadBalancerName'],'Region':region})
elb_count+=1
except:
pass
if elb_count:
unused_elb_detail = json.loads(json.dumps(unused_elb_detail))
f = csv.writer(open("./aws_logs/classic_loadbalancer.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_elb_detail in unused_elb_detail:
f.writerow([unused_elb_detail["ResourceType"],
unused_elb_detail["ResourceId"],
unused_elb_detail["Region"]])
unused_resource_count[function] = elb_count
def app_nw_gateway_loadbalancer(function, credentials, unused_resource_count, region_list):
print('Scanning Application/Network/Gateway Load balancers')
elbv2_count = 0
unused_elbv2_detail = []
for region in region_list:
try:
elbv2 = boto3.client('elbv2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region)
paginated_data=[]
elbv2_paginator = elbv2.get_paginator('describe_load_balancers')
for load_balancers in elbv2_paginator.paginate():
paginated_data.extend(load_balancers['LoadBalancers'])
for elbv2_detail in paginated_data:
target_health_status = []
try:
target_group_detail = elbv2.describe_target_groups(LoadBalancerArn=elbv2_detail['LoadBalancerArn'])['TargetGroups']
for target_group in target_group_detail:
target_group_health = elbv2.describe_target_health(TargetGroupArn=target_group['TargetGroupArn'])['TargetHealthDescriptions']
for target in target_group_health:
target_health_status.append(target['TargetHealth']['State'])
except:
pass
if 'healthy' not in target_health_status:
unused_elbv2_detail.append({'ResourceType':'AWS::ElasticLoadBalancingV2::LoadBalancer', 'LoadBalancer_Type':elbv2_detail['Type'], 'ResourceId':elbv2_detail['LoadBalancerName'],'Region':region})
elbv2_count+=1
except:
pass
if elbv2_count:
unused_elbv2_detail = json.loads(json.dumps(unused_elbv2_detail))
f = csv.writer(open("./aws_logs/app_nw_gateway_loadbalancer.csv", "w", newline=''))
f.writerow(["ResourceType", "LoadBalancer_Type", "ResourceId", "Region"])
for unused_elbv2_detail in unused_elbv2_detail:
f.writerow([unused_elbv2_detail["ResourceType"],
unused_elbv2_detail["LoadBalancer_Type"],
unused_elbv2_detail["ResourceId"],
unused_elbv2_detail["Region"]])
unused_resource_count[function] = elbv2_count
def iam_user(function, credentials, unused_resource_count, region_list):
print('Scanning IAM Users')
iamuser_count = 0
unused_iamuser_detail = []
try:
iam = boto3.resource('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'])
iam_client = boto3.client('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'])
iamuser_data = list(iam.users.all())
for user in iamuser_data:
if not user.password_last_used and not iam_client.list_access_keys(UserName=user.name)['AccessKeyMetadata']:
unused_iamuser_detail.append({'ResourceType':'AWS::IAM::User', 'ResourceId': user.name, 'Region':'Global'})
iamuser_count += 1
except:
pass
if iamuser_count:
unused_iamuser_detail = json.loads(json.dumps(unused_iamuser_detail))
f = csv.writer(open("./aws_logs/iam_user.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_iamuser_detail in unused_iamuser_detail:
f.writerow([unused_iamuser_detail["ResourceType"],
unused_iamuser_detail["ResourceId"],
unused_iamuser_detail["Region"]])
unused_resource_count[function] = iamuser_count
def iam_group(function, credentials, unused_resource_count, region_list):
print('Scanning IAM Groups')
iamgroup_count = 0
unused_iamgroup_detail = []
try:
iam = boto3.resource('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'])
iam_client = boto3.client('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'])
iamgroup_data = list(iam.groups.all())
for group in iamgroup_data:
if not iam_client.get_group(GroupName=group.name)['Users']:
unused_iamgroup_detail.append({'ResourceType':'AWS::IAM::Group', 'ResourceId': group.name, 'Region':'Global'})
iamgroup_count += 1
except:
pass
if iamgroup_count:
unused_iamgroup_detail = json.loads(json.dumps(unused_iamgroup_detail))
f = csv.writer(open("./aws_logs/iam_group.csv", "w", newline=''))
f.writerow(["ResourceType", "ResourceId", "Region"])
for unused_iamgroup_detail in unused_iamgroup_detail:
f.writerow([unused_iamgroup_detail["ResourceType"],
unused_iamgroup_detail["ResourceId"],
unused_iamgroup_detail["Region"]])
unused_resource_count[function] = iamgroup_count
def main(arg):
access_key = arg.accessKey
secret_key = arg.secretKey
region_list = []
unused_resource_details = {}
try:
print("Connecting to AWS account ")
session = boto3.session.Session(aws_access_key_id=access_key, aws_secret_access_key=secret_key)
except:
print("\033[1;31;40m ""Please do Check for Credentials provided or Internet Connection and Try Again\n")
quit()
iam = session.client('sts')
account_id = iam.get_caller_identity()["Account"]
print("Successfully connected to AWS account", account_id)
print("Scanning for unused resources across all available regions.")
print("Wait for few minutes...\n")
function_list= [ ebs_volume, elastic_ip, network_interface, vpc, subnet, security_group, classic_loadbalancer, app_nw_gateway_loadbalancer,
iam_user, iam_group ]
print("Collecting list of enabled region")
available_regions = session.client('ec2',region_name="us-east-1")
enabled_regions = available_regions.describe_regions()['Regions']
for region in enabled_regions:
region_list.append(region['RegionName'])
manager = multiprocessing.Manager()
unused_resource_count = manager.dict()
credentials = manager.dict()
credentials['access_key'] = access_key
credentials['secret_key'] = secret_key
credentials['account_id'] = account_id
jobs = []
try:
os.mkdir("./aws_logs")
except:
pass
for function in function_list:
try:
p = multiprocessing.Process(target=function, args=(function, credentials, unused_resource_count, region_list))
jobs.append(p)
p.start()
except:
print("Exception occurred while creating processes. Please try again later!")
quit()
if jobs:
for process in jobs:
try:
process.join()
except:
print("Exception occurred while joining processes. Please try again later!")
quit()
os.chdir('./aws_logs')
writer = pd.ExcelWriter('unused_resources.xlsx')
all_files = glob.glob("*.csv")
for f in all_files:
df = pd.read_csv(f)
df.to_excel(writer,sheet_name=f.split('.')[0], index=False)
writer.save()
for f in all_files:
os.remove(f)
print("Completed account scan")
# Updating Resource Count Object
unused_resource_details.update({ 'AWS::EC2::Volume': unused_resource_count[ebs_volume],
'AWS::EC2::EIP': unused_resource_count[elastic_ip],
'AWS::EC2::NetworkInterface': unused_resource_count[network_interface],
'AWS::EC2::VPC': unused_resource_count[vpc],
'AWS::EC2::Subnet': unused_resource_count[subnet],
'AWS::EC2::SecurityGroup': unused_resource_count[security_group],
'AWS::ElasticLoadBalancing::LoadBalancer': unused_resource_count[classic_loadbalancer],
'AWS::ElasticLoadBalancingV2::LoadBalancer': unused_resource_count[app_nw_gateway_loadbalancer],
'AWS::IAM::User': unused_resource_count[iam_user],
'AWS::IAM::Group': unused_resource_count[iam_group]
})
# Showing Resource Distribution
print("\nUnused Resources in the Account:")
unused_resource_count = 0
for key, value in sorted(unused_resource_details.items(), key=lambda x: x[1], reverse=True):
if value != 0:
print("\t{} : {}".format(key, value))
unused_resource_count+=value
print("\n\nSummary:")
print("\tTotal Unused Resources:", unused_resource_count)
print("\n\nDetailed unused resource information can be found at: aws_logs/unused_resources.xlsx")
if(__name__ == '__main__'):
arg_parser = argparse.ArgumentParser(prog='unused_aws_resources',
usage='%(prog)s [options]',
description='Count AWS resources')
# Add the arguments
arg_parser.add_argument('--accessKey',
type=str,
required=True,
help='AWS Access Key')
arg_parser.add_argument('--secretKey',
type=str,
required=True,
help='AWS Secret Key')
# Execute the parse_args() method
args = arg_parser.parse_args()
main(args)
| 42.795276 | 214 | 0.620883 | [
"MIT"
] | ankitsrao/aws-unused-resources | unused_aws_resources.py | 21,740 | Python |
import os
from ....modules.utils.config_utils import get_yaml_config
str2yaml = {
"gat": "gat.yaml",
"gcn": "gcn.yaml",
"ggnn": "ggnn.yaml",
"graphsage": "graphsage.yaml",
}
dir_path = os.path.dirname(os.path.realpath(__file__))
def get_graph_embedding_args(graph_embedding_name):
"""
It will build the template for ``GNNBase`` model.
Parameters
----------
graph_embedding_name: str
The graph embedding name. Expected in ["gcn", "gat", "graphsage", "ggnn"].
If it can't find the ``graph_embedding_name``, it will return ``{}``.
Returns
-------
template_dict: dict
The template dict.
The structure is shown as follows:
{
graph_embedding_share: {num_layers: 1, input_size: 300, ...},
graph_embedding_private: {heads: [1], attn_drop: 0.0}
}
The ``graph_embedding_share`` contains the parameters shared by all ``GNNBase`` models.
The ``graph_embedding_private`` contains the parameters specifically in each \
graph_embedding methods.
"""
if graph_embedding_name in str2yaml.keys():
yaml_name = str2yaml[graph_embedding_name]
path = os.path.join(dir_path, yaml_name)
config = get_yaml_config(path)
return config
else:
return {}
__all__ = ["get_graph_embedding_args"]
| 29.847826 | 95 | 0.630736 | [
"Apache-2.0"
] | RyanWangZf/graph4nlp | graph4nlp/pytorch/modules/config/graph_embedding/__init__.py | 1,373 | Python |
import numpy as np
from skimage import io as ios
import PySimpleGUI as sg
import warnings
import m_specfun as m_fun
def select_lines(infile, contrast, lines, res_dict, fits_dict, wloc, outfil):
"""
displays new window with image infile + start + 'fit
a rectangle around the selected line can be selected with dragging the mouse
:param infile: filebase of image
:param contrast: brightness of image
:param lines: list of calibration wavelengths
:param res_dict: dictionary
:param fits_dict: "
:param wloc: location of displayed window for selection
:param outfil: filename without extension (.txt) with results of line selection
:return:
x0, y0: center coordinates of selected rectangle (int)
dx, dy: half width and height of selected rectangle (int)
"""
def fitgaussimage(image, xy0, dxy, lam):
x0 = xy0[0]
y0 = xy0[1]
dx = dxy[0]
dy = dxy[1]
print(x0, y0, dx, dy)
data = image[y0 - dy:y0 + dy, x0 - dx:x0 + dx] # x - y reversed
params, success = m_fun.fit_gaussian_2d(data)
if success in [1, 2, 3, 4]:
(height, x, y, width_x, width_y) = params # x and y reversed
width_x = 2 * np.sqrt(2 * np.log(2)) * np.abs(width_x) # FWHM
width_y = 2 * np.sqrt(2 * np.log(2)) * np.abs(width_y) # FWHM
x = x + y0 - dy # y and x reversed
y = y + x0 - dx
xyw = (y, x, width_y, width_x, lam) # x - y changed back
return xyw
else:
return 0, 0, 0, 0, 0
xyl = []
dxy = [10, 10]
i = i_plot = 0
im, header = m_fun.get_fits_image(infile)
if len(im.shape) == 3:
imbw = np.sum(im, axis=2) # used for fitgaussian(data)
else:
imbw = im
# (ymax, xmax) = im.shape
# print (xmax,ymax)
m_fun.get_fits_keys(header, fits_dict, res_dict, keyprint=False)
# #===================================================================
# new rect_plt
# first get size of graph from tmp.png and size of image
# graph coordinates are in image pixels!
(imy, imx) = im.shape[:2]
image_file = 'tmp.png' # scaled image
imrescale = np.flipud(ios.imread(image_file)) # get shape
(canvasy, canvasx) = imrescale.shape[:2]
wlocw = (wloc[0], wloc[1])
image_elem_sel = [sg.Graph(canvas_size=(canvasx, canvasy), graph_bottom_left=(0, 0),
graph_top_right=(imx, imy), key='-GRAPH-', change_submits=True, drag_submits=True)]
layout_select = [[sg.Ok(), sg.Cancel(), sg.Button('Skip Line'), sg.Button('Finish'),
sg.Button('I'), sg.Button('D'), sg.Text(infile, size=(30, 1)),
sg.Text(key='info', size=(40, 1))], image_elem_sel]
winselect = sg.Window(f'select rectangle for fit size, click lines',
layout_select, finalize=True, location=wlocw,
keep_on_top=True, no_titlebar=False, resizable=True,
disable_close=False, disable_minimize=True, element_padding=(2, 2))
# get the graph element for ease of use later
graph = winselect['-GRAPH-'] # type: sg.Graph
# initialize interactive graphics
winselect_active = True
img = graph.draw_image(image_file, location=(0, imy))
dragging = False
start_point = end_point = prior_rect = None
index = 0
icircle = itext = None
color = 'yellow'
while winselect_active:
event, values = winselect.read()
if event == "-GRAPH-": # if there's a "Graph" event, then it's a mouse
x, y = (values["-GRAPH-"])
if not dragging:
start_point = (x, y)
dragging = True
else:
end_point = (x, y)
if prior_rect:
graph.delete_figure(prior_rect)
if None not in (start_point, end_point):
prior_rect = graph.draw_rectangle(start_point,
end_point, line_color='red')
elif event is not None and event.endswith('+UP'):
# The drawing has ended because mouse up
xy0 = [int(0.5 * (start_point[0] + end_point[0])),
int(0.5 * (start_point[1] + end_point[1]))]
size = (abs(start_point[0] - end_point[0]),
abs(start_point[1] - end_point[1]))
info = winselect["info"]
info.update(value=f"grabbed rectangle at {xy0} with size {size}")
start_point, end_point = None, None # enable grabbing a new rect
dragging = False
if min(size[0], size[1]) > 2: # rectangle
info.update(value=f"rectangle at {xy0} with size {size}")
dxy = size
elif i < len(lines):
if prior_rect:
graph.delete_figure(prior_rect)
print(xy0, lines[i])
xyw = (fitgaussimage(imbw, xy0, dxy, lines[i]))
if xyw[0]: # successful fit
if 0 < xyw[0] < imx and 0 < xyw[1] < imy:
print(np.float16(xyw))
xyl.append(np.float32(xyw))
# Draw the click just made
r = (xyw[2] + xyw[3])/4
icircle = graph.DrawCircle((xyw[0], xyw[1]), r, line_color=color, line_width=3)
itext = graph.DrawText(' ' + str(lines[i]), location=(xyw[0], xyw[1]), color=color,
font=('Arial', 12), angle=45, text_location=sg.TEXT_LOCATION_BOTTOM_LEFT)
info.update(value=f"line {lines[i]} at {np.float16(xyw)}")
graph.update()
i += 1
i_plot += 1
else:
info.update(value='bad fit, try again')
print('bad fit, try again')
else:
info.update(value='Fit not successful, try again')
print('Fit not successful, try again')
else:
info.update(value='all lines measured, press OK or Cancel')
elif event == 'Ok':
if np.array(xyl).shape[0] > 1:
# minimum of two lines needed for fit
xyl = np.array(xyl, dtype=np.float32) # for ordered output
with open(m_fun.change_extension(outfil, '.txt'), 'ab+') as f:
np.savetxt(f, xyl, fmt='%8.2f', header=str(index) + ' ' + str(infile) + '.fit')
np.savetxt(f, np.zeros((1, 5)), fmt='%8.2f')
index += 1
color = 'red' if color == 'yellow' else 'yellow' # alternate colors for spectra
elif icircle:
graph.delete_figure(icircle) # last point
graph.delete_figure(itext)
graph.update()
xyl = []
i = i_plot = 0
elif event == 'Cancel':
for ind in range(i_plot):
xyl = np.array(xyl, dtype=np.float32) # for ordered output
rsq2 = (xyl[ind, 2] + xyl[ind, 3])/5.6
drag_figures = graph.get_figures_at_location((xyl[ind, 0] + rsq2, xyl[ind, 1] + rsq2))
for figure in drag_figures:
if figure != img:
graph.delete_figure(figure)
graph.update()
xyl = []
i = i_plot = 0
elif event == 'Skip Line':
i += 1 # do not increment iplot!
elif event in ('I', 'D'):
if event == 'I':
contrast *= 2
else:
contrast /= 2
im_tmp = imrescale / np.max(imrescale) * 255 * contrast
im_tmp = np.clip(im_tmp, 0.0, 255)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ios.imsave(image_file, np.flipud(im_tmp).astype(np.uint8))
graph.delete_figure(img)
img = graph.draw_image(image_file, location=(0, imy))
graph.send_figure_to_back(img)
elif event in ('Finish', None):
if event == 'Finish':
with open(outfil + '.txt', 'ab+') as f:
np.savetxt(f, np.zeros((1, 5)), fmt='%8.2f')
(x, y) = winselect.current_location()
wlocw = (x, y)
winselect.close()
return wlocw
| 45.09375 | 109 | 0.507046 | [
"MIT"
] | meteorspectroscopy/meteor-spectrum-calibration | myselect.py | 8,658 | Python |
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Pauli X (bit-flip) gate.
Author: Andrew Cross
"""
from qiskit import QuantumRegister
from qiskit import QuantumCircuit
from qiskit import Gate
from qiskit import CompositeGate
from qiskit import InstructionSet
from qiskit.extensions.standard import header
class XGate(Gate):
"""Pauli X (bit-flip) gate."""
def __init__(self, qubit, circ=None):
"""Create new X gate."""
super(XGate, self).__init__("x", [], [qubit], circ)
def qasm(self):
"""Return OPENQASM string."""
qubit = self.arg[0]
return self._qasmif("x %s[%d];" % (qubit[0].name, qubit[1]))
def inverse(self):
"""Invert this gate."""
return self # self-inverse
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.x(self.arg[0]))
def x(self, q):
"""Apply X to q."""
if isinstance(q, QuantumRegister):
gs = InstructionSet()
for j in range(q.size):
gs.add(self.x((q, j)))
return gs
else:
self._check_qubit(q)
return self._attach(XGate(q, self))
QuantumCircuit.x = x
CompositeGate.x = x
| 29.272727 | 80 | 0.610248 | [
"Apache-2.0"
] | NickyBar/QIP | qiskit/extensions/standard/x.py | 1,932 | Python |
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ScheduleSecretDeletionDetails(object):
"""
Details for scheduling the deletion of the specified secret.
"""
def __init__(self, **kwargs):
"""
Initializes a new ScheduleSecretDeletionDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param time_of_deletion:
The value to assign to the time_of_deletion property of this ScheduleSecretDeletionDetails.
:type time_of_deletion: datetime
"""
self.swagger_types = {
'time_of_deletion': 'datetime'
}
self.attribute_map = {
'time_of_deletion': 'timeOfDeletion'
}
self._time_of_deletion = None
@property
def time_of_deletion(self):
"""
Gets the time_of_deletion of this ScheduleSecretDeletionDetails.
An optional property indicating when to delete the secret version, expressed in `RFC 3339`__ timestamp format.
__ https://tools.ietf.org/html/rfc3339
:return: The time_of_deletion of this ScheduleSecretDeletionDetails.
:rtype: datetime
"""
return self._time_of_deletion
@time_of_deletion.setter
def time_of_deletion(self, time_of_deletion):
"""
Sets the time_of_deletion of this ScheduleSecretDeletionDetails.
An optional property indicating when to delete the secret version, expressed in `RFC 3339`__ timestamp format.
__ https://tools.ietf.org/html/rfc3339
:param time_of_deletion: The time_of_deletion of this ScheduleSecretDeletionDetails.
:type: datetime
"""
self._time_of_deletion = time_of_deletion
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 33.986667 | 245 | 0.696352 | [
"Apache-2.0"
] | revnav/sandbox | darling_ansible/python_venv/lib/python3.7/site-packages/oci/vault/models/schedule_secret_deletion_details.py | 2,549 | Python |
# Generated by Django 2.0.4 on 2018-04-20 09:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('daily_tracker', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='attandance',
name='enter_at',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='attandance',
name='out_at',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='attandance',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
| 28.03125 | 121 | 0.620959 | [
"MIT"
] | tanvir002700/tracker | office_tracker/daily_tracker/migrations/0002_auto_20180420_0946.py | 897 | Python |
import argparse
import pandas as pd
import re
ass15 = 'assist15', 'ASSISTments 2015'
ass17 = 'assist17', 'ASSISTments 2017'
prog19 = 'prog19', 'Programming 2019'
synth_k2 = 'synth-k2', 'Synthetic-K2'
synth_k5 = 'synth-k5', 'Synthetic-K5'
ass09up = 'assist09up', 'ASSISTments 2009 Updated'
stat = 'stat', 'Statics'
intro_prog = 'intro-prog', 'IntroProg'
def sl_dict(a, b):
return {'short': a, 'long': b}
dataset_tups = ass15, ass17, prog19, synth_k2, synth_k5, ass09up, stat, intro_prog
datasets = {**{s: sl_dict(s, l) for s, l in dataset_tups}, **{l: sl_dict(s, l) for s, l in dataset_tups}}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Yay, I\'m a description!',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('kfold_results_filename')
parser.add_argument('--min', action='store_true')
parser.add_argument('--metric',
default='auc',
choices={'acc', 'auc', 'prec', 'recall', 'f1', 'mcc', 'rmse', 'aic', 'aicc', 'bic'})
args = parser.parse_args()
kfold_results = pd.read_csv(args.kfold_results_filename, encoding='latin')
kfold_results.dataset = kfold_results.dataset.apply(lambda x: datasets[x]['long'])
kfold_results.dataset = kfold_results.dataset.apply(lambda x: datasets[x]['long'] if x in datasets else x)
max_filter_col = f'{args.metric}-sd'
kfold_results[max_filter_col] = kfold_results[args.metric].apply(lambda x: float(re.split(r'[^0-9.]', x)[0]))
kfold_max_results = kfold_results.loc[kfold_results.groupby(['dataset', 'model'])[max_filter_col].idxmax()] \
if not args.min \
else kfold_results.loc[kfold_results.groupby(['dataset', 'model'])[max_filter_col].idxmin()]
best = 'min' if args.metric in ('rmse', 'aic', 'aicc', 'bic') else 'max'
if 'fold-results' in args.kfold_results_filename:
output_filename = args.kfold_results_filename.replace("-results", f"-{best}-{args.metric}-results")
else:
output_filename = f'kfold-results-{best}-{args.metric}-results.csv'
print(f'wrote {output_filename}')
kfold_max_results = kfold_max_results.drop([max_filter_col], axis=1).sort_values(
by=['dataset', args.metric], ascending=False)
kfold_max_results.to_csv(output_filename, index=False)
| 42.745455 | 113 | 0.67248 | [
"Unlicense"
] | dlkt-review-and-empirical-evaluation/dlkt-review-and-empirical-evaluation | scripts/extract_best_kfold_results.py | 2,351 | Python |
"""
Copyright (c) 2017, Syslog777
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Desktop nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from scapy.all import *
class Ping:
def __init__(self, parser):
parser.add_argument('-f', "--flood", action="store_true")
parser.add_argument("--src", nargs="?", default="172.217.12.110",
help="Source IP address for IP layer of ICMP packet\n"
"Default source address: google server")
try:
self.args = parser.parse_args()
if not (self.args.ping):
print("Host required!")
parser.print_help()
sys.exit(1)
except BaseException:
parser.print_help()
sys.exit(1)
self.host = self.args.ping
self.src = self.args.src
self.flood = self.args.flood
def ping(self):
network_layer = IP(src=self.src, dst=self.host)
packet = network_layer / ICMP(code=8)
print("Ping host at {} from {}".format(self.host, self.src))
send(packet)
def flood_(self):
print("\n###########################################")
print("# Starting ICMP/Ping Flood attack...")
print("###########################################\n")
for src in range(1, 254):
# build the packet
network_layer = IP(src=self.src, dst=self.host)
packet = network_layer / ICMP(code=8)
send(packet)
def execute(self):
if self.flood:
self.flood_()
elif self.ping:
self.ping()
| 40.453333 | 82 | 0.636454 | [
"BSD-3-Clause"
] | Syslog777/psak | psak_src/psak_src/exploit_modules/ping.py | 3,034 | Python |
from aiogram.dispatcher.filters.state import StatesGroup, State
class Support(StatesGroup):
add_text = State()
reply_msg = State()
class AdminPanel(StatesGroup):
text = State()
class SendMsg(StatesGroup):
id = State()
msg = State()
class ChangeText(StatesGroup):
text = State()
| 15.5 | 63 | 0.687097 | [
"MIT"
] | Boryslavq/casino_bot | pkg/states/states.py | 310 | Python |
import os
import pyconll
from ufal.udpipe import Model, Pipeline, ProcessingError
class UDPipeToken:
def __init__(self, ud_token, upos=None, tags=None):
self.id = ud_token.id
self.form = ud_token.form
self.upos = ud_token.upos if upos is None else upos
self.lemma = ud_token.lemma
self.tags = [(k + '=' + list(vx)[0]) for k, vx in ud_token.feats.items()] if tags is None else list(tags)
self.deprel = ud_token.deprel
self.head = ud_token.head
def __repr__(self):
return self.form
class UdpipeParser:
def __init__(self):
self.model = None
self.pipeline = None
self.error = None
def load(self, model_path):
if os.path.isfile(model_path):
udp_model_file = model_path
else:
udp_model_file = os.path.join(model_path, 'udpipe_syntagrus.model')
self.model = Model.load(udp_model_file)
self.pipeline = Pipeline(self.model, 'tokenize', Pipeline.DEFAULT, Pipeline.DEFAULT, 'conllu')
self.error = ProcessingError()
def parse_text(self, text):
parsings = []
processed = self.pipeline.process(text, self.error)
if self.error.occurred():
return None
try:
for parsing0 in pyconll.load_from_string(processed):
parsing = []
for token in parsing0:
utoken = token.form.lower()
if utoken in ['чтоб']:
# Исправляем ошибки разметки некоторых слов в UDPipe.Syntagrus
parsing.append(UDPipeToken(token, upos='SCONJ', tags=[]))
elif utoken in ['средь']:
parsing.append(UDPipeToken(token, upos='ADP', tags=[]))
else:
parsing.append(UDPipeToken(token))
parsings.append(parsing)
except:
return None
return parsings
| 32.95 | 113 | 0.573596 | [
"Unlicense"
] | Koziev/verslibre | py/generative_poetry/udpipe_parser.py | 2,024 | Python |
"""
Laplacian of a compressed-sparse graph
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import isspmatrix, coo_matrix
###############################################################################
# Graph laplacian
def laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
Examples
--------
>>> from scipy.sparse import csgraph
>>> G = np.arange(5) * np.arange(5)[:, np.newaxis]
>>> G
array([[ 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4],
[ 0, 2, 4, 6, 8],
[ 0, 3, 6, 9, 12],
[ 0, 4, 8, 12, 16]])
>>> csgraph.laplacian(G, normed=False)
array([[ 0, 0, 0, 0, 0],
[ 0, 9, -2, -3, -4],
[ 0, -2, 16, -6, -8],
[ 0, -3, -6, 21, -12],
[ 0, -4, -8, -12, 24]])
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = csgraph.astype(np.float)
if isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(
diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = coo_matrix((new_data, (new_row, new_col)), shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = 1 - w_zeros
else:
lap.flat[::n_nodes + 1] = w
if return_diag:
return lap, w
return lap
| 32.514493 | 86 | 0.574326 | [
"Apache-2.0"
] | animesh/parliament2 | docker_version/resources/usr/local/lib/python2.7/dist-packages/scipy/sparse/csgraph/_laplacian.py | 4,487 | Python |
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudcafe.events.models.base import EventBaseModel, EventBaseListModel
class Bandwidth(EventBaseModel):
"""Bandwidth Response Model
@summary: Response model for bandwidth from a compute
event notification
@note: Although the 'public' and 'private' interfaces are
not required, they are the most common names, and are
included as optional attributes for the sake of convenience
@note: This type may contain additional unspecified
BandwidthInterface fields, which will be captured in a
dictionary called kwargs
JSON Example:
{
"private": { <BandwidthInterface> },
"public": { <BandwidthInterface> }
}
"""
kwarg_map = {'private': 'private',
'public': 'public'}
optional_kwargs = ['private', 'public']
strict_checking = False
def __init__(self, private=None, public=None, **kwargs):
super(Bandwidth, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, json_dict):
"""Override dict_to_obj implementation"""
obj = cls._map_values_to_kwargs(json_dict)
for key in obj.kwargs:
obj.kwargs[key] = BandwidthInterface._dict_to_obj(obj.kwargs[key])
if obj.private:
obj.private = BandwidthInterface._dict_to_obj(obj.private)
if obj.public:
obj.public = BandwidthInterface._dict_to_obj(obj.public)
return obj
class BandwidthInterface(EventBaseModel):
"""Bandwidth Interface Response Model
@summary: Response model for bandwidth on an interface from
a compute event notification
@note: Sub-model of Bandwidth
JSON Example:
{
"bw_in": 123456,
"bw_out": 654321
}
"""
kwarg_map = {'bw_in': 'bw_in',
'bw_out': 'bw_out'}
def __init__(self, bw_in, bw_out):
super(BandwidthInterface, self).__init__(locals())
class FixedIp(EventBaseModel):
"""Fixed IP Response Model
@summary: Response model for a fixed IP address from a
compute event notification
@note: Represents a single fixed IP
JSON Example:
{
"address": "10.10.0.0",
"floating_ips": [],
"label": "public",
"meta": {},
"type": "fixed",
"version": 4,
"vif_mac": "FE:ED:FA:00:1C:D4"
}
"""
kwarg_map = {
'address': 'address',
'floating_ips': 'floating_ips',
'label': 'label',
'meta': 'meta',
'type_': 'type',
'version': 'version',
'vif_mac': 'vif_mac'}
def __init__(self, address, floating_ips, label, meta, type_, version,
vif_mac):
super(FixedIp, self).__init__(locals())
class FixedIps(EventBaseListModel):
"""Fixed IPs Model
@summary: Response model for a list of fixed IP addresses
from a compute event notification
@note: Returns a list of elements of type 'FixedIp'
JSON Example:
{
"fixed_ips": [
{ <FixedIp> },
{ <FixedIp> }
]
}
"""
list_model_key = 'fixed_ips'
ObjectModel = FixedIp
class ImageMeta(EventBaseModel):
"""Image Metadata Model
@summary: Response model for image metadata from a compute
event notification
@note: This type may contain additional unspecified
fields, which will be captured in a dictionary called kwargs
JSON Example:
{
"image_meta": {
"auto_disk_config": "disabled",
"base_image_ref": "5e91ad7f-afe4-4a83-bd5f-84673462cae1",
"container_format": "ovf",
"disk_format": "vhd",
"image_type": "base",
"min_disk": "20",
"min_ram": "512",
"org.openstack__1__architecture": "x64",
"org.openstack__1__os_distro": "com.ubuntu",
"org.openstack__1__os_version": "12.04",
"os_type": "linux"
}
}
"""
kwarg_map = {
'auto_disk_config': 'auto_disk_config',
'base_image_ref': 'base_image_ref',
'container_format': 'container_format',
'disk_format': 'disk_format',
'image_type': 'image_type',
'min_disk': 'min_disk',
'min_ram': 'min_ram',
'org_openstack__1__architecture': 'org.openstack__1__architecture',
'org_openstack__1__os_distro': 'org.openstack__1__os_distro',
'org_openstack__1__os_version': 'org.openstack__1__os_version',
'os_type': 'os_type'}
strict_checking = False
def __init__(self, auto_disk_config, base_image_ref, container_format,
disk_format, image_type, min_disk, min_ram,
org_openstack__1__architecture, org_openstack__1__os_distro,
org_openstack__1__os_version, os_type, **kwargs):
super(ImageMeta, self).__init__(locals())
class InstanceException(EventBaseModel):
"""Instance Exception Model
@summary: Response model for an instance exception from a
compute event notification
@note: Represents a single instance exception
JSON Example:
{
"exception": {
"kwargs": {
"instance_uuid": "5e91ad7f-afe4-4a83-bd5f-84673462cae1",
"reason": "Something broke",
"code": 500
}
}
}
"""
kwarg_map = {'kwargs': 'kwargs'}
def __init__(self, kwargs):
super(InstanceException, self).__init__(locals())
| 30.905473 | 78 | 0.605441 | [
"Apache-2.0"
] | kurhula/cloudcafe | cloudcafe/events/models/compute/common.py | 6,212 | Python |
# -*- coding: utf-8 -*-
"""Test i18n module."""
#
# (C) Pywikibot team, 2007-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 791a455ab0a66f7bafcfb71718f915c9dd7b7ab2 $'
import sys
import pywikibot
from pywikibot import i18n, bot, plural
from tests.aspects import unittest, TestCase, DefaultSiteTestCase, PwbTestCase
if sys.version_info[0] == 3:
basestring = (str, )
class TestTranslate(TestCase):
"""Test translate method."""
net = False
def setUp(self):
self.msg_localized = {'en': u'test-localized EN',
'nl': u'test-localized NL',
'fy': u'test-localized FY'}
self.msg_semi_localized = {'en': u'test-semi-localized EN',
'nl': u'test-semi-localized NL'}
self.msg_non_localized = {'en': u'test-non-localized EN'}
self.msg_no_english = {'ja': u'test-no-english JA'}
super(TestTranslate, self).setUp()
def testLocalized(self):
self.assertEqual(i18n.translate('en', self.msg_localized,
fallback=True),
u'test-localized EN')
self.assertEqual(i18n.translate('nl', self.msg_localized,
fallback=True),
u'test-localized NL')
self.assertEqual(i18n.translate('fy', self.msg_localized,
fallback=True),
u'test-localized FY')
def testSemiLocalized(self):
self.assertEqual(i18n.translate('en', self.msg_semi_localized,
fallback=True),
u'test-semi-localized EN')
self.assertEqual(i18n.translate('nl', self.msg_semi_localized,
fallback=True),
u'test-semi-localized NL')
self.assertEqual(i18n.translate('fy', self.msg_semi_localized,
fallback=True),
u'test-semi-localized NL')
def testNonLocalized(self):
self.assertEqual(i18n.translate('en', self.msg_non_localized,
fallback=True),
u'test-non-localized EN')
self.assertEqual(i18n.translate('fy', self.msg_non_localized,
fallback=True),
u'test-non-localized EN')
self.assertEqual(i18n.translate('nl', self.msg_non_localized,
fallback=True),
u'test-non-localized EN')
self.assertEqual(i18n.translate('ru', self.msg_non_localized,
fallback=True),
u'test-non-localized EN')
def testNoEnglish(self):
self.assertEqual(i18n.translate('en', self.msg_no_english,
fallback=True),
u'test-no-english JA')
self.assertEqual(i18n.translate('fy', self.msg_no_english,
fallback=True),
u'test-no-english JA')
self.assertEqual(i18n.translate('nl', self.msg_no_english,
fallback=True),
u'test-no-english JA')
class UserInterfaceLangTestCase(TestCase):
"""Base class for tests using config.userinterface_lang."""
def setUp(self):
super(UserInterfaceLangTestCase, self).setUp()
self.orig_userinterface_lang = pywikibot.config.userinterface_lang
pywikibot.config.userinterface_lang = self.get_site().code
def tearDown(self):
pywikibot.config.userinterface_lang = self.orig_userinterface_lang
super(UserInterfaceLangTestCase, self).tearDown()
class TWNSetMessagePackageBase(TestCase):
"""Partial base class for TranslateWiki tests."""
message_package = None
def setUp(self):
self.orig_messages_package_name = i18n._messages_package_name
i18n.set_messages_package(self.message_package)
super(TWNSetMessagePackageBase, self).setUp()
def tearDown(self):
super(TWNSetMessagePackageBase, self).tearDown()
i18n.set_messages_package(self.orig_messages_package_name)
class TWNTestCaseBase(TWNSetMessagePackageBase):
"""Base class for TranslateWiki tests."""
@classmethod
def setUpClass(cls):
if not isinstance(cls.message_package, basestring):
raise TypeError('%s.message_package must be a package name'
% cls.__name__)
# Th call to set_messages_package below exists only to confirm
# that the package exists and messages are available, so
# that tests can be skipped if the i18n data doesnt exist.
cls.orig_messages_package_name = i18n._messages_package_name
i18n.set_messages_package(cls.message_package)
has_messages = i18n.messages_available()
i18n._messages_package_name = cls.orig_messages_package_name
if not has_messages:
raise unittest.SkipTest("i18n messages package '%s' not available."
% cls.message_package)
super(TWNTestCaseBase, cls).setUpClass()
class TestTWTranslate(TWNTestCaseBase):
"""Test twtranslate method."""
net = False
message_package = 'tests.i18n'
def testLocalized(self):
self.assertEqual(i18n.twtranslate('en', 'test-localized'),
u'test-localized EN')
self.assertEqual(i18n.twtranslate('nl', 'test-localized'),
u'test-localized NL')
self.assertEqual(i18n.twtranslate('fy', 'test-localized'),
u'test-localized FY')
def testSemiLocalized(self):
self.assertEqual(i18n.twtranslate('en', 'test-semi-localized'),
u'test-semi-localized EN')
self.assertEqual(i18n.twtranslate('nl', 'test-semi-localized'),
u'test-semi-localized NL')
self.assertEqual(i18n.twtranslate('fy', 'test-semi-localized'),
u'test-semi-localized NL')
def testNonLocalized(self):
self.assertEqual(i18n.twtranslate('en', 'test-non-localized'),
u'test-non-localized EN')
self.assertEqual(i18n.twtranslate('fy', 'test-non-localized'),
u'test-non-localized EN')
self.assertEqual(i18n.twtranslate('nl', 'test-non-localized'),
u'test-non-localized EN')
self.assertEqual(i18n.twtranslate('ru', 'test-non-localized'),
u'test-non-localized EN')
def testNoEnglish(self):
self.assertRaises(i18n.TranslationError, i18n.twtranslate,
'en', 'test-no-english')
class TestTWNTranslate(TWNTestCaseBase):
"""Test {{PLURAL:}} support."""
net = False
message_package = 'tests.i18n'
def testNumber(self):
"""Use a number."""
self.assertEqual(
i18n.twntranslate('de', 'test-plural', 0) % {'num': 0},
u'Bot: Ändere 0 Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-plural', 1) % {'num': 1},
u'Bot: Ändere 1 Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-plural', 2) % {'num': 2},
u'Bot: Ändere 2 Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-plural', 3) % {'num': 3},
u'Bot: Ändere 3 Seiten.')
self.assertEqual(
i18n.twntranslate('en', 'test-plural', 0) % {'num': 'no'},
u'Bot: Changing no pages.')
self.assertEqual(
i18n.twntranslate('en', 'test-plural', 1) % {'num': 'one'},
u'Bot: Changing one page.')
self.assertEqual(
i18n.twntranslate('en', 'test-plural', 2) % {'num': 'two'},
u'Bot: Changing two pages.')
self.assertEqual(
i18n.twntranslate('en', 'test-plural', 3) % {'num': 'three'},
u'Bot: Changing three pages.')
def testString(self):
"""Use a string."""
self.assertEqual(
i18n.twntranslate('en', 'test-plural', '1') % {'num': 'one'},
u'Bot: Changing one page.')
def testDict(self):
"""Use a dictionary."""
self.assertEqual(
i18n.twntranslate('en', 'test-plural', {'num': 2}),
u'Bot: Changing 2 pages.')
def testExtended(self):
"""Use additional format strings."""
self.assertEqual(
i18n.twntranslate('fr', 'test-plural', {'num': 1, 'descr': 'seulement'}),
u'Robot: Changer seulement une page.')
def testExtendedOutside(self):
"""Use additional format strings also outside."""
self.assertEqual(
i18n.twntranslate('fr', 'test-plural', 1) % {'descr': 'seulement'},
u'Robot: Changer seulement une page.')
def testMultiple(self):
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', 1)
% {'action': u'Ändere', 'line': u'eine'},
u'Bot: Ändere eine Zeile von einer Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', 2)
% {'action': u'Ändere', 'line': u'zwei'},
u'Bot: Ändere zwei Zeilen von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', 3)
% {'action': u'Ändere', 'line': u'drei'},
u'Bot: Ändere drei Zeilen von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', (1, 2, 2))
% {'action': u'Ändere', 'line': u'eine'},
u'Bot: Ändere eine Zeile von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', [3, 1, 1])
% {'action': u'Ändere', 'line': u'drei'},
u'Bot: Ändere drei Zeilen von einer Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', ["3", 1, 1])
% {'action': u'Ändere', 'line': u'drei'},
u'Bot: Ändere drei Zeilen von einer Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', "321")
% {'action': u'Ändere', 'line': u'dreihunderteinundzwanzig'},
u'Bot: Ändere dreihunderteinundzwanzig Zeilen von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals',
{'action': u'Ändere', 'line': 1, 'page': 1}),
u'Bot: Ändere 1 Zeile von einer Seite.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals',
{'action': u'Ändere', 'line': 1, 'page': 2}),
u'Bot: Ändere 1 Zeile von mehreren Seiten.')
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals',
{'action': u'Ändere', 'line': "11", 'page': 2}),
u'Bot: Ändere 11 Zeilen von mehreren Seiten.')
def testMultipleWrongParameterLength(self):
"""Test wrong parameter length."""
with self.assertRaisesRegex(ValueError, "Length of parameter does not match PLURAL occurrences"):
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', (1, 2))
% {'action': u'Ändere', 'line': u'drei'},
u'Bot: Ändere drei Zeilen von mehreren Seiten.')
with self.assertRaisesRegex(ValueError, "Length of parameter does not match PLURAL occurrences"):
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', ["321"])
% {'action': u'Ändere', 'line': u'dreihunderteinundzwanzig'},
u'Bot: Ändere dreihunderteinundzwanzig Zeilen von mehreren Seiten.')
def testMultipleNonNumbers(self):
"""Test error handling for multiple non-numbers."""
with self.assertRaisesRegex(ValueError, "invalid literal for int\(\) with base 10: 'drei'"):
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals', ["drei", "1", 1])
% {'action': u'Ändere', 'line': u'drei'},
u'Bot: Ändere drei Zeilen von einer Seite.')
with self.assertRaisesRegex(ValueError, "invalid literal for int\(\) with base 10: 'elf'"):
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals',
{'action': u'Ändere', 'line': "elf", 'page': 2}),
u'Bot: Ändere elf Zeilen von mehreren Seiten.')
def testAllParametersExist(self):
with self.assertRaisesRegex(KeyError, repr(u'line')):
# all parameters must be inside twntranslate
self.assertEqual(
i18n.twntranslate('de', 'test-multiple-plurals',
{'line': 1, 'page': 1})
% {'action': u'Ändere'},
u'Bot: Ändere 1 Zeile von einer Seite.')
def test_fallback_lang(self):
"""
Test that twntranslate uses the translation's language.
twntranslate calls _twtranslate which might return the translation for
a different language and then the plural rules from that language need
to be applied.
"""
# co has fr as altlang but has no plural rules defined (otherwise this
# test might not catch problems) so it's using the plural variant for 0
# although French uses the plural variant for numbers > 1 (so not 0)
assert 'co' not in plural.plural_rules
assert plural.plural_rules['fr']['plural'](0) is False
self.assertEqual(
i18n.twntranslate('co', 'test-plural', {'num': 0, 'descr': 'seulement'}),
u'Robot: Changer seulement une page.')
self.assertEqual(
i18n.twntranslate('co', 'test-plural', {'num': 1, 'descr': 'seulement'}),
u'Robot: Changer seulement une page.')
class ScriptMessagesTestCase(TWNTestCaseBase):
"""Real messages test."""
net = False
message_package = 'scripts.i18n'
def test_basic(self):
"""Verify that real messages are able to be loaded."""
self.assertEqual(i18n.twntranslate('en', 'pywikibot-enter-new-text'),
'Please enter the new text:')
def test_missing(self):
"""Test a missing message from a real message bundle."""
self.assertRaises(i18n.TranslationError,
i18n.twntranslate, 'en', 'pywikibot-missing-key')
class InputTestCase(TWNTestCaseBase, UserInterfaceLangTestCase, PwbTestCase):
"""Test i18n.input."""
family = 'wikipedia'
code = 'arz'
message_package = 'scripts.i18n'
@classmethod
def setUpClass(cls):
if cls.code in i18n.twget_keys('pywikibot-enter-category-name'):
raise unittest.SkipTest(
'%s has a translation for %s'
% (cls.code, 'pywikibot-enter-category-name'))
super(InputTestCase, cls).setUpClass()
def test_pagegen_i18n_input(self):
"""Test i18n.input via ."""
result = self._execute(args=['listpages', '-cat'],
data_in='non-existant-category\n',
timeout=5)
self.assertIn('Please enter the category name:', result['stderr'])
class MissingPackageTestCase(TWNSetMessagePackageBase,
UserInterfaceLangTestCase,
DefaultSiteTestCase):
"""Test misssing messages package."""
message_package = 'scripts.foobar.i18n'
def _capture_output(self, text, *args, **kwargs):
self.output_text = text
def setUp(self):
super(MissingPackageTestCase, self).setUp()
self.output_text = ''
self.orig_raw_input = bot.ui._raw_input
self.orig_output = bot.ui.output
bot.ui._raw_input = lambda *args, **kwargs: 'dummy input'
bot.ui.output = self._capture_output
def tearDown(self):
bot.ui._raw_input = self.orig_raw_input
bot.ui.output = self.orig_output
super(MissingPackageTestCase, self).tearDown()
def test_pagegen_i18n_input(self):
"""Test i18n.input falls back with missing message package."""
rv = i18n.input('pywikibot-enter-category-name',
fallback_prompt='dummy output')
self.assertEqual(rv, 'dummy input')
self.assertIn('dummy output: ', self.output_text)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| 40.422062 | 105 | 0.573386 | [
"MIT"
] | xZise/pywikibot-core | tests/i18n_tests.py | 16,847 | Python |
import socket
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
process = cms.Process("LHCInfoPopulator")
from CondCore.CondDB.CondDB_cfi import *
#process.load("CondCore.DBCommon.CondDBCommon_cfi")
#process.CondDBCommon.connect = 'sqlite_file:lhcinfo_pop_test.db'
#process.CondDBCommon.DBParameters.authenticationPath = '.'
#process.CondDBCommon.DBParameters.messageLevel=cms.untracked.int32(1)
sourceConnection = 'oracle://cms_omds_adg/CMS_RUNINFO_R'
if socket.getfqdn().find('.cms') != -1:
sourceConnection = 'oracle://cms_omds_lb/CMS_RUNINFO_R'
options = VarParsing.VarParsing()
options.register( 'destinationConnection'
, 'sqlite_file:lhcinfo_pop_test.db' #default value
, VarParsing.VarParsing.multiplicity.singleton
, VarParsing.VarParsing.varType.string
, "Connection string to the DB where payloads will be possibly written."
)
options.register( 'targetConnection'
, '' #default value
, VarParsing.VarParsing.multiplicity.singleton
, VarParsing.VarParsing.varType.string
, """Connection string to the target DB:
if not empty (default), this provides the latest IOV and payloads to compare;
it is the DB where payloads should be finally uploaded."""
)
options.register( 'tag'
, 'LHCInfo_PopCon_start_test'
, VarParsing.VarParsing.multiplicity.singleton
, VarParsing.VarParsing.varType.string
, "Tag written in destinationConnection and finally appended in targetConnection."
)
options.register( 'messageLevel'
, 0 #default value
, VarParsing.VarParsing.multiplicity.singleton
, VarParsing.VarParsing.varType.int
, "Message level; default to 0"
)
options.parseArguments()
CondDBConnection = CondDB.clone( connect = cms.string( options.destinationConnection ) )
CondDBConnection.DBParameters.messageLevel = cms.untracked.int32( options.messageLevel )
process.MessageLogger = cms.Service("MessageLogger",
cout = cms.untracked.PSet(threshold = cms.untracked.string('INFO')),
destinations = cms.untracked.vstring('cout')
)
process.source = cms.Source("EmptyIOVSource",
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
interval = cms.uint64(1)
)
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
CondDBConnection,
timetype = cms.untracked.string('timestamp'),
toPut = cms.VPSet(cms.PSet(record = cms.string('LHCInfoRcd'),
tag = cms.string( options.tag )
)
)
)
process.Test1 = cms.EDAnalyzer("LHCInfoPopConAnalyzer",
SinceAppendMode = cms.bool(True),
record = cms.string('LHCInfoRcd'),
name = cms.untracked.string('LHCInfo'),
Source = cms.PSet(fill = cms.untracked.uint32(6417),
startTime = cms.untracked.string('2018-04-01 00:00:00.000'),
#endTime = cms.untracked.string('2018-03-25 05:00:00.000'),
samplingInterval = cms.untracked.uint32( 600 ),
endFill = cms.untracked.bool(False),
connectionString = cms.untracked.string("oracle://cms_orcon_adg/CMS_RUNTIME_LOGGER"),
ecalConnectionString = cms.untracked.string("oracle://cms_orcon_adg/CMS_DCS_ENV_PVSS_COND"),
DIPSchema = cms.untracked.string("CMS_BEAM_COND"),
omsBaseUrl = cms.untracked.string("http://vocms0184.cern.ch/agg/api/v1"),
#authenticationPath = cms.untracked.string("."),
debug=cms.untracked.bool(False)
),
loggingOn = cms.untracked.bool(True),
IsDestDbCheckedInQueryLog = cms.untracked.bool(False)
)
process.p = cms.Path(process.Test1)
| 55.247191 | 127 | 0.529388 | [
"Apache-2.0"
] | PKUfudawei/cmssw | CondTools/RunInfo/python/LHCInfoPopConAnalyzerStartFill.py | 4,917 | Python |
import string
import numpy as np
import torch as th
from ttools.training import ModelInterface
from . import utils
class VectorizerInterface(ModelInterface):
def __init__(self, model, lr, n_primitives, canvas_size, w_surface, w_alignment, csg, rounded, cuda=True):
self.model = model
self.cuda = cuda
self.n_primitives = n_primitives
self.canvas_size = canvas_size
self.w_surface = w_surface
self.w_alignment = w_alignment
self.csg = csg
self.rounded = rounded
self._step = 0
if self.cuda:
self.model.cuda()
self.optimizer = th.optim.Adam(self.model.parameters(), lr=lr)
def forward(self, batch):
df = batch['distance_fields']
if self.cuda:
df = df.cuda()
params = self.model(df[:,None]).view(df.size(0), self.n_primitives, -1)
params = th.cat([0.35*params[...,:3], params[...,3:]], dim=-1)
df = utils.distance_to_rounded_cuboids if self.rounded else utils.distance_to_cuboids
if self.csg:
plus_params = params[:,:self.n_primitives//2]
plus_distance_fields = utils.compute_distance_fields(plus_params, self.canvas_size, df=df)
plus_distance_fields = plus_distance_fields.min(1)[0]
minus_params = params[:,self.n_primitives//2:]
minus_distance_fields = utils.compute_distance_fields(minus_params, self.canvas_size, df=df)
minus_distance_fields = minus_distance_fields.min(1)[0]
distance_fields = th.max(plus_distance_fields, -minus_distance_fields) ** 2
else:
distance_fields = utils.compute_distance_fields(params, self.canvas_size, df=df)
distance_fields = distance_fields.min(1)[0] ** 2
alignment_fields = utils.compute_alignment_fields(distance_fields)
distance_fields = distance_fields[...,1:-1,1:-1,1:-1]
occupancy_fields = utils.compute_occupancy_fields(distance_fields)
return {
'distance_fields': distance_fields,
'alignment_fields': alignment_fields,
'occupancy_fields': occupancy_fields
}
def _compute_lossses(self, batch, fwd_data):
ret = {}
target_distance_fields = batch['distance_fields']
target_alignment_fields = batch['alignment_fields']
target_occupancy_fields = batch['occupancy_fields']
if self.cuda:
target_distance_fields = target_distance_fields.cuda()
target_alignment_fields = target_alignment_fields.cuda()
target_occupancy_fields = target_occupancy_fields.cuda()
distance_fields = fwd_data['distance_fields']
alignment_fields = fwd_data['alignment_fields']
occupancy_fields = fwd_data['occupancy_fields']
surfaceloss = th.mean(target_occupancy_fields*distance_fields + target_distance_fields*occupancy_fields)
alignmentloss = th.mean(1 - th.sum(target_alignment_fields*alignment_fields, dim=-1)**2)
ret['surfaceloss'] = surfaceloss
ret['alignmentloss'] = alignmentloss
loss = self.w_surface*surfaceloss + self.w_alignment*alignmentloss
ret['loss'] = loss
return ret
def training_step(self, batch):
self.model.train()
fwd_data = self.forward(batch)
self.optimizer.zero_grad()
losses_dict = self._compute_lossses(batch, fwd_data)
loss = losses_dict['loss']
loss.backward()
self.optimizer.step()
self._step += 1
return { k: v.item() for k, v in losses_dict.items() }
def init_validation(self):
losses = ['loss', 'surfaceloss', 'alignmentloss']
ret = { l: 0 for l in losses }
ret['count'] = 0
return ret
def validation_step(self, batch, running_data):
self.model.eval()
n = batch['distance_fields'].shape[0]
count = running_data['count']
fwd_data = self.forward(batch)
losses_dict = self._compute_lossses(batch, fwd_data)
loss = losses_dict['loss']
surfaceloss = losses_dict['surfaceloss']
alignmentloss = losses_dict['alignmentloss']
return {
'loss': (running_data['loss']*count + loss.item()*n) / (count+n),
'surfaceloss': (running_data['surfaceloss']*count + surfaceloss.item()*n) / (count+n),
'alignmentloss': (running_data['alignmentloss']*count + alignmentloss.item()*n) / (count+n),
'count': count+n
}
| 37.07377 | 112 | 0.644263 | [
"MIT"
] | dmsm/DeepParametricShapes | dps_3d/interfaces.py | 4,523 | Python |
"""
CSVLogger writes power values to a csv file.
"""
__author__ = 'Md Shifuddin Al Masud'
__email__ = '[email protected]'
__license__ = 'MIT License'
from pv_simulator.FileWriter import FileWriter
import csv
from datetime import datetime
import aiofiles
from aiocsv import AsyncWriter
import logging
class CSVFileWriter(FileWriter):
__destination = ""
def __init__(self, destination):
"""
:param destination:
"""
self.__destination = destination
async def write(self, timestamp: datetime, meter_power_value: int, simulator_power_value: int,
combined_power_value: int) -> None:
"""
Writes values into a csv file
:param timestamp:
:param meter_power_value:
:param simulator_power_value:
:param combined_power_value:
:return:
"""
async with aiofiles.open(self.__destination, mode='a') as csv_file:
csv_file_writer = AsyncWriter(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
await csv_file_writer.writerow([datetime.now(), meter_power_value, simulator_power_value,
combined_power_value])
logging.debug("%s, %s, %s, %s are writen to %s", datetime.now(), meter_power_value, simulator_power_value,
combined_power_value, self.__destination)
| 34.170732 | 114 | 0.65596 | [
"MIT"
] | Shifuddin/PV-Simulator-Challenge | pv_simulator/CSVFileWriter.py | 1,401 | Python |
'''
This function takes in index_list, data_path , save_path as the arguments. It writes a video consisting
of the frame in data_path into save_path and writes a text file with the indexed of the representative
frame into the same directory
inputs :
- frames = list of frames i.e. numpy arrays
- scale = scale to resize, int
- data_path = path to original data, string
- save_original_path = path to save scaled video, string
- Detrac = For development purposes only, True
outputs : None
'''
import cv2
import os
import imageio
from glob import glob
from tqdm import tqdm
def write_original_video(frames, scale=1 , data_path='', save_original_path='' ,Detrac = False):
if Detrac:
c = 0
width = 960
height = 540
fourcc = cv2.VideoWriter_fourcc(*'MP42')
for i in os.listdir('DETRAC-Images/'):
video = cv2.VideoWriter('./DETRAC_video.avi',fourcc,30, (width,height))
for j in range(1, len(os.listdir('DETRAC-Images/' + i + '/'))+1):
j = str(j)
jj= 5-len(j)
k = "img" + jj*"0" +j +".jpg"
img = imageio.imread('DETRAC-Images/' + i + '/' + k + '/')
video.write(img)
video.release()
cv2.destroyAllWindows()
break
video.release()
cv2.destroyAllWindows()
else:
c = 0
fourcc = cv2.VideoWriter_fourcc(*'MP42')
if type(scale) == tuple:
width = scale[1]
height = scale[0]
else:
cap = cv2.VideoCapture(data_path)
ret, img_for_shape = cap.read()
cap.release()
width = float(scale) * int(img_for_shape.shape[1])
height = float(scale) * int(img_for_shape.shape[0])
width = int(width)
height = int(height)
video = cv2.VideoWriter(save_original_path,fourcc,30, (width,height))
print()
for img in tqdm(list(frames)):
img = cv2.resize( img , (width, height))
video.write(img)
video.release()
cv2.destroyAllWindows()
| 30.289474 | 105 | 0.532146 | [
"Apache-2.0"
] | alilakda/Eva | storage/compression/write_original_video.py | 2,302 | Python |
"""Tests for _data_finder.py."""
import os
import shutil
import tempfile
import pytest
import yaml
import esmvalcore._config
from esmvalcore._data_finder import (get_input_filelist, get_input_fx_filelist,
get_output_file)
from esmvalcore.cmor.table import read_cmor_tables
# Initialize with standard config developer file
esmvalcore._config.CFG = esmvalcore._config.read_config_developer_file()
# Initialize CMOR tables
read_cmor_tables(esmvalcore._config.CFG)
# Load test configuration
with open(os.path.join(os.path.dirname(__file__), 'data_finder.yml')) as file:
CONFIG = yaml.safe_load(file)
def print_path(path):
"""Print path."""
txt = path
if os.path.isdir(path):
txt += '/'
if os.path.islink(path):
txt += ' -> ' + os.readlink(path)
print(txt)
def tree(path):
"""Print path, similar to the the `tree` command."""
print_path(path)
for dirpath, dirnames, filenames in os.walk(path):
for dirname in dirnames:
print_path(os.path.join(dirpath, dirname))
for filename in filenames:
print_path(os.path.join(dirpath, filename))
def create_file(filename):
"""Create an empty file."""
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'a'):
pass
def create_tree(path, filenames=None, symlinks=None):
"""Create directory structure and files."""
for filename in filenames or []:
create_file(os.path.join(path, filename))
for symlink in symlinks or []:
link_name = os.path.join(path, symlink['link_name'])
os.symlink(symlink['target'], link_name)
@pytest.mark.parametrize('cfg', CONFIG['get_output_file'])
def test_get_output_file(cfg):
"""Test getting output name for preprocessed files."""
output_file = get_output_file(cfg['variable'], cfg['preproc_dir'])
assert output_file == cfg['output_file']
@pytest.fixture
def root():
"""Root function for tests."""
dirname = tempfile.mkdtemp()
yield os.path.join(dirname, 'output1')
print("Directory structure was:")
tree(dirname)
shutil.rmtree(dirname)
@pytest.mark.parametrize('cfg', CONFIG['get_input_filelist'])
def test_get_input_filelist(root, cfg):
"""Test retrieving input filelist."""
create_tree(root, cfg.get('available_files'),
cfg.get('available_symlinks'))
# Find files
rootpath = {cfg['variable']['project']: [root]}
drs = {cfg['variable']['project']: cfg['drs']}
input_filelist = get_input_filelist(cfg['variable'], rootpath, drs)
# Test result
reference = [os.path.join(root, file) for file in cfg['found_files']]
assert sorted(input_filelist) == sorted(reference)
@pytest.mark.parametrize('cfg', CONFIG['get_input_fx_filelist'])
def test_get_input_fx_filelist(root, cfg):
"""Test retrieving fx filelist."""
create_tree(root, cfg.get('available_files'),
cfg.get('available_symlinks'))
# Find files
rootpath = {cfg['variable']['project']: [root]}
drs = {cfg['variable']['project']: cfg['drs']}
fx_files = get_input_fx_filelist(cfg['variable'], rootpath, drs)
# Test result
reference = {
fx_var: os.path.join(root, filename) if filename else None
for fx_var, filename in cfg['found_files'].items()
}
assert fx_files == reference
| 30.04386 | 79 | 0.670949 | [
"Apache-2.0"
] | Peter9192/ESMValCore | tests/integration/test_data_finder.py | 3,425 | Python |
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
| 42.317992 | 77 | 0.635159 | [
"BSD-3-Clause"
] | kapteyn-astro/kapteyn | kapteyn/interpolation.py | 20,228 | Python |
# coding: utf-8
"""
Katib
Swagger description for Katib # noqa: E501
OpenAPI spec version: v1alpha3-0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from kubeflow.katib.models.v1alpha3_algorithm_spec import V1alpha3AlgorithmSpec # noqa: F401,E501
from kubeflow.katib.models.v1alpha3_metrics_collector_spec import V1alpha3MetricsCollectorSpec # noqa: F401,E501
from kubeflow.katib.models.v1alpha3_nas_config import V1alpha3NasConfig # noqa: F401,E501
from kubeflow.katib.models.v1alpha3_objective_spec import V1alpha3ObjectiveSpec # noqa: F401,E501
from kubeflow.katib.models.v1alpha3_parameter_spec import V1alpha3ParameterSpec # noqa: F401,E501
from kubeflow.katib.models.v1alpha3_trial_template import V1alpha3TrialTemplate # noqa: F401,E501
class V1alpha3ExperimentSpec(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'algorithm': 'V1alpha3AlgorithmSpec',
'max_failed_trial_count': 'int',
'max_trial_count': 'int',
'metrics_collector_spec': 'V1alpha3MetricsCollectorSpec',
'nas_config': 'V1alpha3NasConfig',
'objective': 'V1alpha3ObjectiveSpec',
'parallel_trial_count': 'int',
'parameters': 'list[V1alpha3ParameterSpec]',
'resume_policy': 'str',
'trial_template': 'V1alpha3TrialTemplate'
}
attribute_map = {
'algorithm': 'algorithm',
'max_failed_trial_count': 'maxFailedTrialCount',
'max_trial_count': 'maxTrialCount',
'metrics_collector_spec': 'metricsCollectorSpec',
'nas_config': 'nasConfig',
'objective': 'objective',
'parallel_trial_count': 'parallelTrialCount',
'parameters': 'parameters',
'resume_policy': 'resumePolicy',
'trial_template': 'trialTemplate'
}
def __init__(self, algorithm=None, max_failed_trial_count=None, max_trial_count=None, metrics_collector_spec=None, nas_config=None, objective=None, parallel_trial_count=None, parameters=None, resume_policy=None, trial_template=None): # noqa: E501
"""V1alpha3ExperimentSpec - a model defined in Swagger""" # noqa: E501
self._algorithm = None
self._max_failed_trial_count = None
self._max_trial_count = None
self._metrics_collector_spec = None
self._nas_config = None
self._objective = None
self._parallel_trial_count = None
self._parameters = None
self._resume_policy = None
self._trial_template = None
self.discriminator = None
if algorithm is not None:
self.algorithm = algorithm
if max_failed_trial_count is not None:
self.max_failed_trial_count = max_failed_trial_count
if max_trial_count is not None:
self.max_trial_count = max_trial_count
if metrics_collector_spec is not None:
self.metrics_collector_spec = metrics_collector_spec
if nas_config is not None:
self.nas_config = nas_config
if objective is not None:
self.objective = objective
if parallel_trial_count is not None:
self.parallel_trial_count = parallel_trial_count
if parameters is not None:
self.parameters = parameters
if resume_policy is not None:
self.resume_policy = resume_policy
if trial_template is not None:
self.trial_template = trial_template
@property
def algorithm(self):
"""Gets the algorithm of this V1alpha3ExperimentSpec. # noqa: E501
Describes the suggestion algorithm. # noqa: E501
:return: The algorithm of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3AlgorithmSpec
"""
return self._algorithm
@algorithm.setter
def algorithm(self, algorithm):
"""Sets the algorithm of this V1alpha3ExperimentSpec.
Describes the suggestion algorithm. # noqa: E501
:param algorithm: The algorithm of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3AlgorithmSpec
"""
self._algorithm = algorithm
@property
def max_failed_trial_count(self):
"""Gets the max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
Max failed trials to mark experiment as failed. # noqa: E501
:return: The max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: int
"""
return self._max_failed_trial_count
@max_failed_trial_count.setter
def max_failed_trial_count(self, max_failed_trial_count):
"""Sets the max_failed_trial_count of this V1alpha3ExperimentSpec.
Max failed trials to mark experiment as failed. # noqa: E501
:param max_failed_trial_count: The max_failed_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:type: int
"""
self._max_failed_trial_count = max_failed_trial_count
@property
def max_trial_count(self):
"""Gets the max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
Max completed trials to mark experiment as succeeded # noqa: E501
:return: The max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: int
"""
return self._max_trial_count
@max_trial_count.setter
def max_trial_count(self, max_trial_count):
"""Sets the max_trial_count of this V1alpha3ExperimentSpec.
Max completed trials to mark experiment as succeeded # noqa: E501
:param max_trial_count: The max_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:type: int
"""
self._max_trial_count = max_trial_count
@property
def metrics_collector_spec(self):
"""Gets the metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501
For v1alpha3 we will keep the metrics collector implementation same as v1alpha1. # noqa: E501
:return: The metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3MetricsCollectorSpec
"""
return self._metrics_collector_spec
@metrics_collector_spec.setter
def metrics_collector_spec(self, metrics_collector_spec):
"""Sets the metrics_collector_spec of this V1alpha3ExperimentSpec.
For v1alpha3 we will keep the metrics collector implementation same as v1alpha1. # noqa: E501
:param metrics_collector_spec: The metrics_collector_spec of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3MetricsCollectorSpec
"""
self._metrics_collector_spec = metrics_collector_spec
@property
def nas_config(self):
"""Gets the nas_config of this V1alpha3ExperimentSpec. # noqa: E501
:return: The nas_config of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3NasConfig
"""
return self._nas_config
@nas_config.setter
def nas_config(self, nas_config):
"""Sets the nas_config of this V1alpha3ExperimentSpec.
:param nas_config: The nas_config of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3NasConfig
"""
self._nas_config = nas_config
@property
def objective(self):
"""Gets the objective of this V1alpha3ExperimentSpec. # noqa: E501
Describes the objective of the experiment. # noqa: E501
:return: The objective of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3ObjectiveSpec
"""
return self._objective
@objective.setter
def objective(self, objective):
"""Sets the objective of this V1alpha3ExperimentSpec.
Describes the objective of the experiment. # noqa: E501
:param objective: The objective of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3ObjectiveSpec
"""
self._objective = objective
@property
def parallel_trial_count(self):
"""Gets the parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
How many trials can be processed in parallel. Defaults to 3 # noqa: E501
:return: The parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: int
"""
return self._parallel_trial_count
@parallel_trial_count.setter
def parallel_trial_count(self, parallel_trial_count):
"""Sets the parallel_trial_count of this V1alpha3ExperimentSpec.
How many trials can be processed in parallel. Defaults to 3 # noqa: E501
:param parallel_trial_count: The parallel_trial_count of this V1alpha3ExperimentSpec. # noqa: E501
:type: int
"""
self._parallel_trial_count = parallel_trial_count
@property
def parameters(self):
"""Gets the parameters of this V1alpha3ExperimentSpec. # noqa: E501
List of hyperparameter configurations. # noqa: E501
:return: The parameters of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: list[V1alpha3ParameterSpec]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1alpha3ExperimentSpec.
List of hyperparameter configurations. # noqa: E501
:param parameters: The parameters of this V1alpha3ExperimentSpec. # noqa: E501
:type: list[V1alpha3ParameterSpec]
"""
self._parameters = parameters
@property
def resume_policy(self):
"""Gets the resume_policy of this V1alpha3ExperimentSpec. # noqa: E501
Describes resuming policy which usually take effect after experiment terminated. # noqa: E501
:return: The resume_policy of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: str
"""
return self._resume_policy
@resume_policy.setter
def resume_policy(self, resume_policy):
"""Sets the resume_policy of this V1alpha3ExperimentSpec.
Describes resuming policy which usually take effect after experiment terminated. # noqa: E501
:param resume_policy: The resume_policy of this V1alpha3ExperimentSpec. # noqa: E501
:type: str
"""
self._resume_policy = resume_policy
@property
def trial_template(self):
"""Gets the trial_template of this V1alpha3ExperimentSpec. # noqa: E501
Template for each run of the trial. # noqa: E501
:return: The trial_template of this V1alpha3ExperimentSpec. # noqa: E501
:rtype: V1alpha3TrialTemplate
"""
return self._trial_template
@trial_template.setter
def trial_template(self, trial_template):
"""Sets the trial_template of this V1alpha3ExperimentSpec.
Template for each run of the trial. # noqa: E501
:param trial_template: The trial_template of this V1alpha3ExperimentSpec. # noqa: E501
:type: V1alpha3TrialTemplate
"""
self._trial_template = trial_template
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1alpha3ExperimentSpec, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha3ExperimentSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 34.544 | 251 | 0.661958 | [
"Apache-2.0"
] | ChenjunZou/katib | sdk/python/v1alpha3/kubeflow/katib/models/v1alpha3_experiment_spec.py | 12,954 | Python |
## utility functions
## including: labelling, annotation, continuous borders
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
## create labels
def generate_class_label(data):
"""
generates class label on a copy of data using the columns
State, From_X, From_Y, To_X, To_Y
"""
r_data = data.copy()
r_data['target'] = \
r_data.State.astype(np.str) + "_"+ \
r_data.From_X.astype(np.str)+ "," + r_data.From_Y.astype(np.str)+ "_" + \
r_data.To_X.astype(np.str)+ "," + r_data.To_Y.astype(np.str)
return r_data
def generate_class_label_and_drop(data):
"""
generates class label on a copy of data using the columns
State, From_X, From_Y, To_X, To_Y
"""
r_data = data.copy()
r_data['target'] = \
r_data.State.astype(np.str) + "_"+ \
r_data.From_X.astype(np.str)+ "," + r_data.From_Y.astype(np.str)+ "_" + \
r_data.To_X.astype(np.str)+ "," + r_data.To_Y.astype(np.str)
r_data = r_data.drop('From_X', 1)
r_data = r_data.drop('From_Y', 1)
r_data = r_data.drop('To_Y', 1)
r_data = r_data.drop('To_X', 1)
r_data = r_data.drop('State', 1)
r_data = r_data.drop('ID', 1)
r_data = r_data.drop('Rng_ID', 1)
r_data = r_data[ ~r_data['target'].str.contains("Pause") ]
r_data = r_data[ ~r_data['target'].str.contains("Enter") ]
r_data = r_data[ ~r_data['target'].str.contains("Leave") ]
return r_data.reset_index()
def generate_class_label_presence(data, state_variable ="target"):
"""
generates class label only for presence on a copy of data using only the columns
Removes: Pause and merges 'Step' and 'Stand' to same class
"""
r_data = data.copy()
r_data = r_data[ ~r_data['target'].str.contains("Pause") ]
r_data.loc[ r_data['target'].str.contains("Step"), 'target' ] = "Present"
r_data.loc[ r_data['target'].str.contains("Stand"), 'target' ] = "Present"
r_data.loc[ r_data['target'].str.contains("Walk"), 'target' ] = "Present"
# remove enter and leave
r_data = r_data[ ~r_data['target'].str.contains("Enter") ]
r_data = r_data[ ~r_data['target'].str.contains("Leave") ]
r_data.loc[ ~r_data['target'].str.contains("Present"), 'target' ] = "Not Present"
return r_data.reset_index()
def generate_class_label_dyn_vs_empty(data, state_variable ="target"):
"""
generates class label only for presence on a copy of data using only the columns
Removes: Pause and merges 'Step' and 'Stand' to same class
"""
r_data = data.copy()
r_data = r_data[ ~r_data['target'].str.contains("Pause") ]
r_data.loc[ r_data['target'].str.contains("Walk"), 'target' ] = "Present"
r_data.loc[ r_data['target'].str.contains("Step"), 'target' ] = "Present"
r_data.loc[ r_data['target'].str.contains("Empty"), 'target' ] = "Not Present"
# remove enter and leave
r_data = r_data[ ~r_data['target'].str.contains("Enter") ]
r_data = r_data[ ~r_data['target'].str.contains("Stand") ]
r_data = r_data[ ~r_data['target'].str.contains("Leave") ]
return r_data.reset_index()
def generate_class_label_presence_and_dynamic(data, state_variable ="State"):
"""
generates class label only for presence on a copy of data using only the columns
Removes: Pause and merges 'Step' and 'Stand' to same class
"""
r_data = data.copy()
r_data['target'] = r_data[state_variable].astype(np.str)
r_data = r_data[ ~r_data['target'].str.contains("Pause") ]
r_data = r_data[ ~r_data['target'].str.contains("Enter") ]
r_data = r_data[ ~r_data['target'].str.contains("Leave") ]
r_data.loc[ r_data['target'].str.contains("Step"), 'target' ] = "Step"
r_data.loc[ r_data['target'].str.contains("Walki"), 'target' ] = "Walk"
r_data.loc[ r_data['target'].str.contains("Stand"), 'target' ] = "Stand"
r_data.loc[ r_data['target'].str.contains("Empty"), 'target' ] = "Empty"
return r_data
def get_contigous_borders(indices):
"""
helper function to derive contiguous borders from a list of indices
Parameters
----------
indicies : all indices at which a certain thing occurs
Returns
-------
list of groups when the indices starts and ends (note: last element is the real last element of the group _not_ n+1)
"""
r =[ [indices[0]] ]
prev = r[0][0]
for ix,i in enumerate(indices):
# distance bw last occurence and current > 1
# then there is obviously a space
if (i - prev) > 1:
# add end
r[-1].append(indices[ix-1])
# add new start
r.append([ indices[ix] ])
prev = i
r[-1].append( indices[-1] )
return r
def get_contiguous_activity_borders(data, label):
"""
returns a dict with all starts ends of all labels provided in label variable
"""
labels = data[label].unique()
r = {}
for l in labels:
a = data[data[label] == l].index.values
r[l] = get_contigous_borders(a)
r['length'] = data.shape[0]
return(r)
def annotate(a):
"""
draws annotation into a sns heatmap using plt annotation
a : dictonary with activity name and borders
"""
min_length = 4
for k in a.keys():
if k == "length":
continue
borders = a[k]
for s,e in borders:
# need to correct for coordinates starting at 0,0
s_r = a['length'] - s
e_r = a['length'] - e
#print(s_r, e_r)
plt.annotate("",
xy=(4, s_r), xycoords='data',
xytext=(4, e_r), textcoords='data',
arrowprops=dict(shrink=0.0, headwidth=10.0, headlength=1.0, width=0.25, shrinkA=0.0, shrinkB=0.0 )
#arrowprops=dict(arrowstyle="|-|",
# connectionstyle="arc3"),
)
# only write text if enough space available
if s_r - e_r < min_length:
continue
plt.annotate(k,
xy=(7, s_r-((s_r-e_r)//2)-min_length//2), xycoords='data',
xytext=(7, s_r-((s_r-e_r)//2)-min_length//2), textcoords='data',
size=9
)
def get_trx_groups(data, group_key="_ifft_0"):
lst = data.columns[data.columns.str.contains(group_key)]
groups = [ [x[:-2]] for x in lst]
return groups | 31.747664 | 123 | 0.568737 | [
"MIT"
] | hhain/sdap17 | notebooks/pawel_ueb2/utility.py | 6,794 | Python |
# Script:
#
# remove all articles from the DB which have no
# references to them and are older than a number of days
#
# works with the db that is defined in the configuration
# pointed by ZEEGUU_CORE_CONFIG
#
# takes as argument the number of days before which the
# articles will be deleted.
#
# call like this to remove all articles older than 90 days
#
#
# python remove_unreferenced_articles.py 90
#
#
#
from zeeguu_core.model import Article, UserArticle, UserActivityData
from zeeguu_core import db
dbs = db.session
import sys
try:
DAYS = int(sys.argv[1])
except:
print ("\nOOOPS: you must provide a number of days before which the articles to be deleted\n")
exit(-1)
deleted = []
print("1. finding urls in activity data...")
all_urls = set()
all_activity_data = UserActivityData.query.all()
for each in all_activity_data:
url = each.find_url_in_extra_data()
if url:
all_urls.add(url)
print(f" ... url count: {len(all_urls)}")
#
print(f"2. finding articles older than {DAYS} days...")
all_articles = Article.all_older_than(days=DAYS)
print(f" ... article count: {len(all_articles)}")
i = 0
for each in all_articles:
i += 1
info = UserArticle.find_by_article(each)
url_found = each.url.as_string() in all_urls
if info or url_found:
if info:
print(f"WON'T DELETE info! {each.id} {each.title}")
for ainfo in info:
print(ainfo.user_info_as_string())
if url_found:
print(f"WON'T DELETE url_found! {each.id} {each.title}")
else:
deleted.append(each.id)
dbs.delete(each)
if i == 1000:
dbs.commit()
i = 0
dbs.commit()
print(f'Deleted: {deleted}')
| 22.88 | 98 | 0.666667 | [
"MIT"
] | simonchristensen1/Zeeguu-Core | tools/remove_unreferenced_articles.py | 1,716 | Python |
# coding: utf8
from __future__ import unicode_literals
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .tag_map import TAG_MAP
from .morph_rules import MORPH_RULES
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ..norm_exceptions import BASE_NORMS
from ...language import Language
from ...attrs import LANG, NORM
from ...util import update_exc, add_lookups
def _return_lt(_):
return "lt"
class LithuanianDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = _return_lt
lex_attr_getters[NORM] = add_lookups(
Language.Defaults.lex_attr_getters[NORM], BASE_NORMS
)
lex_attr_getters.update(LEX_ATTRS)
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
stop_words = STOP_WORDS
tag_map = TAG_MAP
morph_rules = MORPH_RULES
class Lithuanian(Language):
lang = "lt"
Defaults = LithuanianDefaults
__all__ = ["Lithuanian"]
| 25.560976 | 76 | 0.780534 | [
"MIT"
] | AbhishekSinhaCoder/spaCy | spacy/lang/lt/__init__.py | 1,048 | Python |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the implementation of the confirmation_aw3 skill."""
from aea.configurations.base import PublicId
PUBLIC_ID = PublicId.from_str("fetchai/confirmation_aw3:0.3.0")
| 37.884615 | 80 | 0.617259 | [
"Apache-2.0"
] | marcofavorito/agents-aea | packages/fetchai/skills/confirmation_aw3/__init__.py | 985 | Python |
import pytest
from django.test import TestCase
from .factories import CoopTypeFactory, CoopFactory, AddressFactory, PhoneContactMethodFactory
from directory.models import Coop, CoopType
class ModelTests(TestCase):
@classmethod
def setUpTestData(cls):
print("setUpTestData: Run once to set up non-modified data for all class methods.")
#management.call_command('loaddata', 'test_data.yaml', verbosity=0)
pass
@pytest.mark.django_db
def test_phone_create(self):
""" Test phone contact method """ # create phone instance
phone_num = "7739441467"
phone = PhoneContactMethodFactory.create(phone=phone_num)
assert phone_num == phone.phone
assert phone.id is not None
@pytest.mark.django_db
def test_phone_create_invalid_num(self):
""" Test phone contact method """ # create phone instance
phone_num = "abcdefsfdsf"
phone = PhoneContactMethodFactory.create(phone=phone_num)
assert phone_num == phone.phone
assert phone.id is not None
print("\n\n\n\n-------------id is ", id)
@pytest.mark.django_db
def test_coop_type_create(self):
""" Test coop type model """ # create customer model instance
coop_type = CoopTypeFactory(name="Test Coop Type Name")
assert coop_type.name == "Test Coop Type Name"
@pytest.mark.django_db
def test_address_create(self):
""" Test address model """ # create customer model instance
address = AddressFactory()
assert address is not None
@pytest.mark.django_db
def test_coop_create(self):
""" Test customer model """ # create customer model instance
coop_from_factory = CoopFactory()
self.assertIsNotNone(coop_from_factory)
coop = Coop.objects.create(name='test')
coop.addresses.set(coop_from_factory.addresses.all())
self.assertIsNotNone(coop)
@pytest.mark.django_db
def test_coop_create_with_existing_type(self):
""" Test customer model """ # create customer model instance
coop_from_factory = CoopFactory()
self.assertIsNotNone(coop_from_factory)
coop_types = coop_from_factory.types
coop = CoopFactory.create(types=[coop_types.all().first()], addresses=coop_from_factory.addresses.all())
self.assertIsNotNone(coop)
@pytest.mark.django_db
def test_coop_create_with_no_types(self):
""" Test customer model """ # create customer model instance
print("\n\n\n\n**********-------- starting test ....\n")
coop = CoopFactory.build(types=[])
print("phone:",coop.phone.phone)
print("email:",coop.email.email)
coop.full_clean()
self.assertIsNotNone(coop)
self.assertIsNone( coop.id )
def test_search_coops_wo_coords(self):
"""
Look for coops with addresses without latitude/longitude coords
"""
address = AddressFactory(latitude=None, longitude=None)
coop_from_factory = CoopFactory(addresses=[address])
# Verify coop appears when we search for those without a lat/lon
coops = Coop.objects.find_wo_coords()
results = list(coops)
assert len(results) > 0, "Failed to find any matching results."
assert coop_from_factory in list(coops), "Failed to find coop."
| 38.850575 | 112 | 0.664201 | [
"MIT"
] | DaveyDevs/maps | web/tests/test_models.py | 3,380 | Python |
import json
from io import BytesIO
from six import text_type
import attr
from zope.interface import implementer
from twisted.internet import address, threads, udp
from twisted.internet._resolver import HostResolution
from twisted.internet.address import IPv4Address
from twisted.internet.defer import Deferred
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import IReactorPluggableNameResolver
from twisted.python.failure import Failure
from twisted.test.proto_helpers import MemoryReactorClock
from synapse.http.site import SynapseRequest
from synapse.util import Clock
from tests.utils import setup_test_homeserver as _sth
class TimedOutException(Exception):
"""
A web query timed out.
"""
@attr.s
class FakeChannel(object):
"""
A fake Twisted Web Channel (the part that interfaces with the
wire).
"""
_reactor = attr.ib()
result = attr.ib(default=attr.Factory(dict))
_producer = None
@property
def json_body(self):
if not self.result:
raise Exception("No result yet.")
return json.loads(self.result["body"].decode('utf8'))
@property
def code(self):
if not self.result:
raise Exception("No result yet.")
return int(self.result["code"])
def writeHeaders(self, version, code, reason, headers):
self.result["version"] = version
self.result["code"] = code
self.result["reason"] = reason
self.result["headers"] = headers
def write(self, content):
assert isinstance(content, bytes), "Should be bytes! " + repr(content)
if "body" not in self.result:
self.result["body"] = b""
self.result["body"] += content
def registerProducer(self, producer, streaming):
self._producer = producer
self.producerStreaming = streaming
def _produce():
if self._producer:
self._producer.resumeProducing()
self._reactor.callLater(0.1, _produce)
if not streaming:
self._reactor.callLater(0.0, _produce)
def unregisterProducer(self):
if self._producer is None:
return
self._producer = None
def requestDone(self, _self):
self.result["done"] = True
def getPeer(self):
# We give an address so that getClientIP returns a non null entry,
# causing us to record the MAU
return address.IPv4Address("TCP", "127.0.0.1", 3423)
def getHost(self):
return None
@property
def transport(self):
return self
class FakeSite:
"""
A fake Twisted Web Site, with mocks of the extra things that
Synapse adds.
"""
server_version_string = b"1"
site_tag = "test"
@property
def access_logger(self):
class FakeLogger:
def info(self, *args, **kwargs):
pass
return FakeLogger()
def make_request(
reactor,
method,
path,
content=b"",
access_token=None,
request=SynapseRequest,
shorthand=True,
):
"""
Make a web request using the given method and path, feed it the
content, and return the Request and the Channel underneath.
Args:
method (bytes/unicode): The HTTP request method ("verb").
path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.
escaped UTF-8 & spaces and such).
content (bytes or dict): The body of the request. JSON-encoded, if
a dict.
shorthand: Whether to try and be helpful and prefix the given URL
with the usual REST API path, if it doesn't contain it.
Returns:
A synapse.http.site.SynapseRequest.
"""
if not isinstance(method, bytes):
method = method.encode('ascii')
if not isinstance(path, bytes):
path = path.encode('ascii')
# Decorate it to be the full path, if we're using shorthand
if shorthand and not path.startswith(b"/_matrix"):
path = b"/_matrix/client/r0/" + path
path = path.replace(b"//", b"/")
if isinstance(content, text_type):
content = content.encode('utf8')
site = FakeSite()
channel = FakeChannel(reactor)
req = request(site, channel)
req.process = lambda: b""
req.content = BytesIO(content)
if access_token:
req.requestHeaders.addRawHeader(
b"Authorization", b"Bearer " + access_token.encode('ascii')
)
if content:
req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
req.requestReceived(method, path, b"1.1")
return req, channel
def wait_until_result(clock, request, timeout=100):
"""
Wait until the request is finished.
"""
clock.run()
x = 0
while not request.finished:
# If there's a producer, tell it to resume producing so we get content
if request._channel._producer:
request._channel._producer.resumeProducing()
x += 1
if x > timeout:
raise TimedOutException("Timed out waiting for request to finish.")
clock.advance(0.1)
def render(request, resource, clock):
request.render(resource)
wait_until_result(clock, request)
@implementer(IReactorPluggableNameResolver)
class ThreadedMemoryReactorClock(MemoryReactorClock):
"""
A MemoryReactorClock that supports callFromThread.
"""
def __init__(self):
self._udp = []
self.lookups = {}
class Resolver(object):
def resolveHostName(
_self,
resolutionReceiver,
hostName,
portNumber=0,
addressTypes=None,
transportSemantics='TCP',
):
resolution = HostResolution(hostName)
resolutionReceiver.resolutionBegan(resolution)
if hostName not in self.lookups:
raise DNSLookupError("OH NO")
resolutionReceiver.addressResolved(
IPv4Address('TCP', self.lookups[hostName], portNumber)
)
resolutionReceiver.resolutionComplete()
return resolution
self.nameResolver = Resolver()
super(ThreadedMemoryReactorClock, self).__init__()
def listenUDP(self, port, protocol, interface='', maxPacketSize=8196):
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
self._udp.append(p)
return p
def callFromThread(self, callback, *args, **kwargs):
"""
Make the callback fire in the next reactor iteration.
"""
d = Deferred()
d.addCallback(lambda x: callback(*args, **kwargs))
self.callLater(0, d.callback, True)
return d
def setup_test_homeserver(cleanup_func, *args, **kwargs):
"""
Set up a synchronous test server, driven by the reactor used by
the homeserver.
"""
d = _sth(cleanup_func, *args, **kwargs).result
if isinstance(d, Failure):
d.raiseException()
# Make the thread pool synchronous.
clock = d.get_clock()
pool = d.get_db_pool()
def runWithConnection(func, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runWithConnection,
func,
*args,
**kwargs
)
def runInteraction(interaction, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runInteraction,
interaction,
*args,
**kwargs
)
pool.runWithConnection = runWithConnection
pool.runInteraction = runInteraction
class ThreadPool:
"""
Threadless thread pool.
"""
def start(self):
pass
def stop(self):
pass
def callInThreadWithCallback(self, onResult, function, *args, **kwargs):
def _(res):
if isinstance(res, Failure):
onResult(False, res)
else:
onResult(True, res)
d = Deferred()
d.addCallback(lambda x: function(*args, **kwargs))
d.addBoth(_)
clock._reactor.callLater(0, d.callback, True)
return d
clock.threadpool = ThreadPool()
pool.threadpool = ThreadPool()
pool.running = True
return d
def get_clock():
clock = ThreadedMemoryReactorClock()
hs_clock = Clock(clock)
return (clock, hs_clock)
@attr.s
class FakeTransport(object):
"""
A twisted.internet.interfaces.ITransport implementation which sends all its data
straight into an IProtocol object: it exists to connect two IProtocols together.
To use it, instantiate it with the receiving IProtocol, and then pass it to the
sending IProtocol's makeConnection method:
server = HTTPChannel()
client.makeConnection(FakeTransport(server, self.reactor))
If you want bidirectional communication, you'll need two instances.
"""
other = attr.ib()
"""The Protocol object which will receive any data written to this transport.
:type: twisted.internet.interfaces.IProtocol
"""
_reactor = attr.ib()
"""Test reactor
:type: twisted.internet.interfaces.IReactorTime
"""
disconnecting = False
buffer = attr.ib(default=b'')
producer = attr.ib(default=None)
def getPeer(self):
return None
def getHost(self):
return None
def loseConnection(self):
self.disconnecting = True
def abortConnection(self):
self.disconnecting = True
def pauseProducing(self):
self.producer.pauseProducing()
def unregisterProducer(self):
if not self.producer:
return
self.producer = None
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerStreaming = streaming
def _produce():
d = self.producer.resumeProducing()
d.addCallback(lambda x: self._reactor.callLater(0.1, _produce))
if not streaming:
self._reactor.callLater(0.0, _produce)
def write(self, byt):
self.buffer = self.buffer + byt
def _write():
if getattr(self.other, "transport") is not None:
self.other.dataReceived(self.buffer)
self.buffer = b""
return
self._reactor.callLater(0.0, _write)
_write()
def writeSequence(self, seq):
for x in seq:
self.write(x)
| 26.29803 | 84 | 0.615341 | [
"Apache-2.0"
] | AlohaHealth/synapse | tests/server.py | 10,677 | Python |
import sys
import pwnlib
from pwnlib.context import context
pwnlib.log.console.stream = sys.stderr
choices = list(map(str, [16, 32, 64]))
choices += list(context.oses)
choices += list(context.architectures)
choices += list(context.endiannesses)
def context_arg(arg):
try:
context.arch = arg
except Exception:
pass
try:
context.os = arg
except Exception:
pass
try:
context.bits = int(arg)
except Exception:
pass
try:
context.endian = arg
except Exception:
pass
return arg
| 16.542857 | 38 | 0.623489 | [
"MIT"
] | IMULMUL/python3-pwntools | pwnlib/commandline/common.py | 579 | Python |
import torch.nn as nn
import torch.nn.functional as F
class RNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(RNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
if not args.levin_flag_quantile:
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
else:
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions * args.N_QUANT)
def init_hidden(self):
# make hidden states on same device as model
# 主要是在 controllers 中使用
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
mb_size = inputs.size(0)
x = F.relu(self.fc1(inputs))
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = self.rnn(x, h_in)
if not self.args.levin_flag_quantile:
q = self.fc2(h)
else:
q = self.fc2(h).view(mb_size, self.args.n_actions, self.args.N_QUANT)
return q, h
| 35.125 | 84 | 0.637011 | [
"Apache-2.0"
] | halleanwoo/AGMA | src_convention/modules/agents/rnn_agent.py | 1,138 | Python |
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class TutorialSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class TutorialDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 34.605769 | 78 | 0.666296 | [
"MIT"
] | 15217718797/Movie_Recommend | tutorial/tutorial/middlewares.py | 3,601 | Python |
from keras.engine.topology import Layer
from keras.backend.tensorflow_backend import tf
class Multiplexer(Layer):
def __init__(self, output_dim, nb_ctrl_sig, **kwargs):
"""
This layer is used to split the output of a previous Dense layer into
nb_ctrl_sig groups of size output_dim, and choose which group to provide
as output using a discrete control signal.
It takes as input two tensors, namely the output of the previous layer
and a column tensor with int32 or int64 values for the control signal.
The Dense input to this layer must be of shape (None, prev_output_dim),
where prev_output_dim = output_dim * nb_ctrl_sig.
No checks are done at runtime to ensure that the input to the layer is
correct, so it's better to double check.
An example usage of this layer may be:
input = Input(shape=(3,))
control = Input(shape=(1,), dtype='int32')
hidden = Dense(6)(i) # output_dim == 2, nb_ctrl_sig == 3
output = Multiplexer(2, 3)([hidden, control])
model = Model(input=[input, control], output=output)
...
x = randn(3) # Input has size 3
ctrl = array([0, 1, 2])
# Outputs the first two neurons of the Dense layer
model.predict([x, ctrl[0]])
# Outputs the middle two neurons of the Dense layer
model.predict([x, ctrl[1]])
# Outputs the last two neurons of the Dense layer
model.predict([x, ctrl[2]])
# Arguments
output_dim: positive integer, dimensionality of the output space.
nb_ctrl_sig: positive integer, number of groups in which to split
the output of the previous layer. Must satisfy the relation:
input_size = nb_ctrl_sig * output_dim
"""
self.output_dim = output_dim
self.nb_ctrl_sig = nb_ctrl_sig
super(Multiplexer, self).__init__(**kwargs)
def build(self, input_shape):
super(Multiplexer, self).build(input_shape)
def call(self, args, mask=None):
return self.multiplexer(args, self.output_dim, self.nb_ctrl_sig)
def get_output_shape_for(self, input_shape):
return input_shape[0], self.output_dim
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.output_dim
return tuple(output_shape)
@staticmethod
def multiplexer(args, output_size, nb_actions):
"""
Returns a tensor of shape (None, output_size) where each sample is
the result of masking each sample in full_input with a binary mask that
preserves only output_size elements, based on the corresponding control
value in indices.
"""
full_input, indices = args
'''
For example, given:
full_input: [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
nb_actions: 3
output_size: 2
indices: [[0], [2]]
desired output: [[1, 2], [11, 12]]
we want to output the first two elements (index 0) of the first sample
and the last two elements (index 2) of the second sample.
To do this, we need the absolute indices [[0, 1], [4, 5]].
To build these, first compute the base absolute indices (0 and 4) by
multiplying the control indices for the output size:
[[0], [2]] * 2 = [[0], [4]]
'''
base_absolute_indices = tf.multiply(indices, output_size)
'''
Build an array containing the base absolute indices repeated output_size
times:
[[0, 0], [4, 4]]
'''
bai_repeated = tf.tile(base_absolute_indices, [1, output_size])
'''
Finally, add range(output_size) to these tensors to get the full
absolute indices:
[0, 0] + [0, 1] = [0, 1]
[4, 4] + [0, 1] = [4, 5]
so we have:
[[0, 1], [4, 5]]
'''
absolute_indices = tf.add(bai_repeated, tf.range(output_size))
'''
Flatten this tensor in order to compute the one hot encoding for each
absolute index:
[0, 1, 4, 5]
'''
ai_flat = tf.reshape(absolute_indices, [-1])
'''
Compute the one-hot encoding for the absolute indices.
From [0, 1, 4, 5] we get:
[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]
'''
ai_onehot = tf.one_hot(ai_flat, output_size * nb_actions)
'''
Build the mask for full_input from the one-hot-encoded absolute indices.
We need to group the one-hot absolute indices into groups of output_size
elements.
We get:
[
[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0]],
[[0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]
]
'''
group_shape = [-1, output_size, output_size * nb_actions]
group = tf.reshape(ai_onehot, group_shape)
'''
Reduce_sum along axis 1 to collapse the group and get the binary masks.
[[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1]]
'''
masks = tf.reduce_sum(group, axis=1)
'''
Convert the mask to boolean.
[[True, True, False, False, False, False],
[False, False, False, False, True, True]]
'''
zero = tf.constant(0, dtype=tf.float32)
bool_masks = tf.not_equal(masks, zero)
'''
Convert the boolean masks back to absolute indices for the full_input
tensor (each element represents [sample index, value index]).
We get:
[[0, 0], [0, 1], [1, 4], [1, 5]]
'''
ai_mask = tf.where(bool_masks)
'''
Apply the masks to full_input. We get a 1D tensor:
[1, 2, 11, 12]
'''
reduced_output = tf.gather_nd(full_input, ai_mask)
'''
Reshape the reduction to match the output shape.
We get:
[[1, 2], [11, 12]]
'''
return tf.reshape(reduced_output, [-1, output_size])
| 36.215909 | 80 | 0.558833 | [
"MIT"
] | 2vin/multiplexed_cnn | multiplexer.py | 6,374 | Python |
from Methods import MetaTraderDataConverter, Ichimoku, Ichimoku_plot
import datetime
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from mpl_finance import candlestick_ohlc
import pandas as pd
import numpy as np
datafile="USDJPY_H1_2014_2018.csv"
df = MetaTraderDataConverter(datafile)
#df = Ichimoku(df)
start = df.index[0]
end = df.index[200]
df = df.iloc[200:400]
df = Ichimoku(df)
df = df.dropna()
print(df)
Ichimoku_plot(df) | 19.2 | 68 | 0.789583 | [
"MIT"
] | JoshChima/Ichimoku | The_Chart.py | 480 | Python |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.documents import urls as wagtaildocs_urls
urlpatterns = [
url(r'^django-admin/', admin.site.urls),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 30.5 | 80 | 0.760929 | [
"BSD-3-Clause"
] | divio/aldryn-wagtail | aldryn_wagtail/urls.py | 732 | Python |
#! /usr/bin/python
import sys,shutil, urllib2, json, time, subprocess, os, commands, signal, re
sys.path.insert(0, 'srch2lib')
import test_lib
port = '8087'
# This test case reads data from the json files
# Then it reads all the access control data from json files too
# Then it does some search and it uses roleId in the query
# And all the results should have this roleId in their access list
# it reads the keywords and role ids from queriesAndResults.txt file
# the format of each line in this file is like:
# coreName keyword roleid || results
# example : core1 hello 103 || 12 14 18
#Function of checking the results
def checkResult(query, responseJson,resultValue):
# for key, value in responseJson:
# print key, value
isPass=1
if len(responseJson) == len(resultValue):
for i in range(0, len(resultValue)):
#print response_json['results'][i]['record']['id']
if (resultValue.count(responseJson[i]['record']['id']) != 1):
isPass=0
print query+' test failed'
print 'query results||given results'
print 'number of results:'+str(len(responseJson))+'||'+str(len(resultValue))
for i in range(0, len(responseJson)):
print str(responseJson[i]['record']['id']) + '||' + resultValue[i]
break
else:
isPass=0
print query+' test failed - differing response lengths'
print 'query results||given results'
print 'number of results:'+str(len(responseJson))+'||'+str(len(resultValue))
maxLen = max(len(responseJson),len(resultValue))
for i in range(0, maxLen):
if i >= len(resultValue):
print str(responseJson[i]['record']['id'])+'||'
elif i >= len(responseJson):
print ' '+'||'+resultValue[i]
else:
print responseJson[i]['record']['id']+'||'+resultValue[i]
if isPass == 1:
print query+' test pass'
return 0
return 1
#prepare the query based on the valid syntax
def prepareQuery(queryKeyword, roleId, fuzzy):
query = ''
################# prepare main query part
query = query + 'q='
# local parameters
# query = query + '%7BdefaultPrefixComplete=COMPLETE%7D'
# keywords section
if fuzzy:
keyword = queryKeyword + '~'
else:
keyword = queryKeyword
query=query+keyword+'&'
# print 'Query : ' + query
##################################
query = query + 'roleId=' + roleId
return query
def testMultipleCores(queriesAndResultsPath, binary_path):
#Start the engine server
args = [ binary_path, '--config-file=./access_control/conf-acl.xml' ]
if test_lib.confirmPortAvailable(port) == False:
print 'Port ' + str(port) + ' already in use - aborting'
return -1
print 'starting engine: ' + args[0] + ' ' + args[1]
serverHandle = test_lib.startServer(args)
test_lib.pingServer(port)
failCount = 0
print "Test core1 - access control"
f_in = open(queriesAndResultsPath, 'r')
for line in f_in:
#get the query keyword and results
value=line.split('||')
if(value[0] == 'S'):
queryValue=value[1].split(' ')
allResults=value[2].split('@')
for coreResult in allResults:
resultValue=coreResult.split()
#construct the query
query='http://localhost:' + port + '/' + queryValue[0] + '/search?'
query = query + prepareQuery(queryValue[1], queryValue[2], False)
#do the query
response = urllib2.urlopen(query).read()
response_json = json.loads(response)
#check the result
failCount += checkResult(query, response_json['results'], resultValue)
else:
# the line is command query (insert/delete/update/acl etc)
coreName = value[1]
command = value[2]
payload = value[3]
if coreName == "":
query='http://localhost:' + port + '/' + command
else:
query='http://localhost:' + port + '/' + coreName + '/' + command
print query
request = urllib2.Request(query, data=payload)
request.get_method = lambda: 'PUT'
opener = urllib2.build_opener(urllib2.HTTPHandler)
url = opener.open(request)
time.sleep(1)
time.sleep(5)
test_lib.killServer(serverHandle)
print '=============================='
return failCount
if __name__ == '__main__':
if(os.path.exists("./access-control/core1Data")):
shutil.rmtree("./access-control/core1Data")
if(os.path.exists("./access-control/core2Data")):
shutil.rmtree("./access-control/core2Data")
if(os.path.exists("./access-control/core3Data")):
shutil.rmtree("./access-control/core3Data")
if(os.path.exists("./access-control/core4Data")):
shutil.rmtree("./access-control/core4Data")
#Path of the query file
#each line like "core1 trust 1000||01c90b4effb2353742080000" ---- coreName query roleId||record_ids(results)
binary_path = sys.argv[1]
queriesAndResultsPath = sys.argv[2]
exitCode = testMultipleCores(queriesAndResultsPath, binary_path)
if(os.path.exists("./access-control/core1Data")):
shutil.rmtree("./access-control/core1Data")
if(os.path.exists("./access-control/core2Data")):
shutil.rmtree("./access-control/core2Data")
if(os.path.exists("./access-control/core3Data")):
shutil.rmtree("./access-control/core3Data")
if(os.path.exists("./access-control/core4Data")):
shutil.rmtree("./access-control/core4Data")
os._exit(exitCode)
| 36.5125 | 112 | 0.593632 | [
"BSD-3-Clause"
] | SRCH2/srch2-ngn | test/wrapper/system_tests/access_control/record-based-ACL.py | 5,842 | Python |
from .estimate import estimate
from .estimatediff import estimatediff
from .estimatewithcv import estimatewithcv
from .asymptoticconfidenceinterval import asymptoticconfidenceinterval
from .asymptoticconfidenceintervalwithcv import asymptoticconfidenceintervalwithcv
from .onlineasymci import Online
from .gradcheck import gradcheck, hesscheck
from .cressieread import CressieRead
from .euclidean import Euclidean
from .crminustwo import CrMinusTwo
from .cidifference import cidifference
| 40.666667 | 82 | 0.883197 | [
"Unlicense"
] | pmineiro/elfcb | MLE/MLE/__init__.py | 488 | Python |
# SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the license found in the LICENSE.txt file in the root directory
# of this source tree.
# =======
# Imports
# =======
import sympy
# ==========================
# Declare symbolic variables
# ==========================
n = sympy.symbols('n', integer=True, positive=True)
t = sympy.symbols('t', real=True, positive=True)
| 24.478261 | 79 | 0.650089 | [
"MIT"
] | ameli/Orthogonal-Functions | ortho/_orthogonal_functions/declarations.py | 563 | Python |
from abc import ABC, abstractmethod
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
class mab_user(ABC):
def __init__(self, n_arms, lamb=1):
super(mab_user, self).__init__()
self.t = torch.tensor(1.0)
self.r = torch.zeros(n_arms)
self.n = torch.zeros(n_arms)
self.id = -1
self.returns = 0
self.lamb = lamb
@abstractmethod
def choose(self):
pass
@abstractmethod
def update(self, arm, reward):
pass
class perfect_user(mab_user):
# users that always make perfect decision -- can be paired with recEngines
# in CF simulations
def __init__(self, n_arms):
super().__init__(n_arms)
def setup_learners(self, learners):
#this setup routine must be called before perfect_user can run
self.learners = learners
def choose(self):
l_max = [0]*len(self.learners)
for i,learner in enumerate(self.learners):
l_max[i] = torch.max(learner.U[self.id] @ learner.V.t())
return torch.argmax(torch.tensor(l_max))
def update(self, arm, reward):
pass
class ucb_user(mab_user):
def __init__(self, n_arms):
super().__init__(n_arms)
def _ranking(self):
return self.r + self.lamb*torch.sqrt(2*torch.log(self.t)/self.n)
def choose(self):
return torch.argmax(self._ranking())
def update(self, arm, reward):
self.r[arm] = self.r[arm]*(self.n[arm]) + reward
self.n[arm] += 1
self.r[arm] /= self.n[arm]
self.t += 1
self.returns += reward
class e_greedy_user(ucb_user):
def __init__(self, n_arms, eps_scaling=0.333, r_tol=1e-20, eps0=1.0):
super().__init__(n_arms)
self.eps_scaling = eps_scaling
self.eps = eps0
self.eps0 = eps0
self.n_arms = n_arms
self.r_tol = r_tol
def choose(self):
if random.random() > self.eps:
a = torch.argmax(self.r + self.r_tol*torch.randn(self.r.shape))
else:
a = random.randint(0,self.n_arms-1)
return a
def update(self, arm, reward):
super().update(arm, reward)
self.eps = self.eps0/(self.t**self.eps_scaling)
class sw_ucb_user(mab_user):
def __init__(self, n_arms):
super(ucb_user, self).__init__()
self.n_arms = n_arms
self.t = torch.tensor(1.0)
self.tau
self.sw_r = []
self.sw_arms = []
self.n = torch.zeros(self.n_arms)
self.r = torch.zeros(self.n_arms)
self.alpha = 0.9
self.lamb = 1
self.id = -1
self.returns = 0
def _ranking(self):
return self.r/self.n + self.lamb*torch.sqrt(
(1+self.alpha)*torch.log(self.t)/self.n)
def update(self, arm, reward):
self.sw_arm.append(arm)
self.sw_r.append(reward)
self.r[arm] += reward
self.returns += reward
self.n[arm] += 1
tau_prime = torch.min(torch.ceil(self.lamb*(self.t**self.alpha)),self.t)
delta_tau = tau_prime - self.tau
if delta_tau < 1.0:
arm = self.sw_arm.pop(0)
self.r[arm] -= [self.sw_r.pop(0)]
self.n[arm] -= 1
self.tau = tau_prime
| 27.266667 | 80 | 0.584352 | [
"Apache-2.0"
] | tginart/competing-ai | user.py | 3,272 | Python |
from flask import request, jsonify
from flask_restful import Resource, reqparse, abort
from flask_jwt import current_app
from app.auth.models import User
def generate_token(user):
""" Currently this is workaround
since the latest version that already has this function
is not published on PyPI yet and we don't want
to install the package directly from GitHub.
See: https://github.com/mattupstate/flask-jwt/blob/9f4f3bc8dce9da5dd8a567dfada0854e0cf656ae/flask_jwt/__init__.py#L145
"""
jwt = current_app.extensions['jwt']
token = jwt.jwt_encode_callback(user)
return token
class SignUpResource(Resource):
parser = reqparse.RequestParser(bundle_errors=True)
parser.add_argument('email', type=str, required=True)
parser.add_argument('password', type=str, required=True)
def post(self):
args = self.parser.parse_args()
if not User.query.filter_by(email=args['email']).scalar():
User(
email = args['email'],
password = args['password']
).save()
return {'message': 'Sign up successfully'}
abort(400, message='Email already exists.')
class LoginResource(Resource):
parser = reqparse.RequestParser(bundle_errors=True)
parser.add_argument('email', type=str, required=True)
parser.add_argument('password', type=str, required=True)
def post(self):
args = self.parser.parse_args()
user = User.query.filter_by(email=args['email']).first()
if user is not None and user.verify_password(args['password']):
token = generate_token(user)
return jsonify({'token': token.decode("utf-8")})
abort(400, message='Invalid credentials')
| 37.652174 | 122 | 0.681871 | [
"MIT"
] | Abhishek1373/Building-Serverless-Python-Web-Services-with-Zappa | Chapter04/app/auth/resources.py | 1,732 | Python |
import unittest
import cryptomon.common as common
class Testcryptomon(unittest.TestCase):
def setUp(self):
self.response = [
{
"id": "bitcoin",
"name": "Bitcoin",
"symbol": "BTC",
"rank": "1",
"price_usd": "15653.3",
"price_btc": "1.0",
"24h_volume_usd": "14446900000.0",
"market_cap_usd": "261915508097",
"available_supply": "16732287.0",
"total_supply": "16732287.0",
"max_supply": "21000000.0",
"percent_change_1h": "0.75",
"percent_change_24h": "4.73",
"percent_change_7d": "34.5",
"last_updated": "1512920953",
"price_eur": "13304.365802",
"24h_volume_eur": "12278998186.0",
"market_cap_eur": "222612466952"
},
{
"id": "ethereum",
"name": "Ethereum",
"symbol": "ETH",
"rank": "2",
"price_usd": "452.479",
"price_btc": "0.0288748",
"24h_volume_usd": "1736900000.0",
"market_cap_usd": "43552764899.0",
"available_supply": "96253671.0",
"total_supply": "96253671.0",
"percent_change_1h": "0.58",
"percent_change_24h": "-7.03",
"percent_change_7d": "-5.36",
"last_updated": "1512920957",
"price_eur": "384.58000126",
"24h_volume_eur": "1476260786.0",
"market_cap_eur": "37017236998.0"
}
]
def test_find_data(self):
filtered_data = common.find_data(self.response, ['BTC'])
self.assertEqual(len(filtered_data), 1)
filtered_data = common.find_data(self.response, ['btc'])
self.assertEqual(len(filtered_data), 1)
filtered_data = common.find_data(self.response, ['eth'])
self.assertEqual(len(filtered_data), 1)
filtered_data = common.find_data(self.response, ['ETH'])
self.assertEqual(len(filtered_data), 1)
filtered_data = common.find_data(self.response, ['LTC'])
self.assertEqual(len(filtered_data), 0)
filtered_data = common.find_data(self.response, ['BTC', 'eth'])
self.assertEqual(len(filtered_data), 2)
filtered_data = common.find_data(self.response, ['BTC', 'eth', 'ltc'])
self.assertEqual(len(filtered_data), 2)
def test_tabulate_data(self):
tabulated_data = common.process_data(self.response)
self.assertEqual(len(tabulated_data), 3)
# all items must have same number of fields
for item in tabulated_data:
self.assertEqual(len(item), len(tabulated_data[0]))
self.assertEqual(tabulated_data[0][0], common.fields_good_name["rank"])
self.assertEqual(tabulated_data[0][1], common.fields_good_name["symbol"])
self.assertEqual(tabulated_data[0][2], common.fields_good_name["price"])
self.assertEqual(tabulated_data[0][3], common.fields_good_name["percent_change_24h"])
self.assertEqual(tabulated_data[0][4], common.fields_good_name["percent_change_1h"])
self.assertEqual(tabulated_data[0][5], common.fields_good_name["market_cap"])
if __name__ == '__main__':
unittest.main(verbosity=2)
| 41.902439 | 93 | 0.559953 | [
"MIT"
] | S0L1DUS/cryptocoinmon | tests/test_cryptomon.py | 3,436 | Python |
class Options:
def __init__(self, output_formats):
self.output_formats = output_formats
def get_output_formats(self):
return self.output_formats
def __add__(self, other):
return Options(output_formats=self.output_formats + other.output_formats)
| 28.3 | 81 | 0.724382 | [
"MIT"
] | das-g/osmaxx-conversion-service | conversion_service/converters/converter.py | 283 | Python |
import logging
import random
import re
from urllib.parse import urljoin
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup
logger = logging.getLogger(__name__)
IMAGE_FILE_REGEX = re.compile(r'([-\w]+\.(?:jpg|jpeg|gif|png))',
re.IGNORECASE)
def crawl_page(project_data, prerequisites):
""" Picks a random image off of the passed URL."""
result = {
'status': 'success',
'image': None
}
url = project_data.get('url')
if not url:
result['status'] = 'error'
result['error_message'] = 'URL was not provided.'
return result
# Crawl the website for images.
logger.info('Starting to crawl %s', url)
images = find_images(url)
num_images = len(images)
logger.info('Found %s images', num_images)
if num_images == 0:
result['status'] = 'error'
result['error_message'] = 'Unable to find images at the provided URL.'
return result
# Return a random one.
logger.info('Picking a random one...')
image = random.choice(list(images))
result['image'] = image
return result
def find_images(url):
""" Fetches a url's HTML and extracts all image sources in an <img> tag.
"""
images = set()
# Fetch the content.
headers = {
'User-Agent': ('Mozilla/5.0 (compatible; OrchestraBot/1.0; '
'[email protected])'),
}
response = requests.get(url, headers=headers)
if response.status_code < 200 or response.status_code >= 300:
logger.error("Couldn't fetch url {}".format(url))
return images
content = response.text
# Find images in the content.
soup = BeautifulSoup(content)
tags = soup.find_all('img', src=IMAGE_FILE_REGEX)
for tag in tags:
link = tag.get('src')
if link is None:
continue
if not bool(urlparse(link).netloc):
link = urljoin(url, link)
images.add(link)
return images
| 27.162162 | 78 | 0.612438 | [
"Apache-2.0"
] | b12io/orchestra | simple_workflow/v1/crawl.py | 2,010 | Python |
import math
import random
from typing import Dict, Iterable, Sequence, Tuple
from eth.constants import ZERO_HASH32
from eth_typing import BLSPubkey, BLSSignature, Hash32
from eth_utils import to_tuple
from eth_utils.toolz import keymap as keymapper
from eth_utils.toolz import pipe
from eth2._utils.bitfield import get_empty_bitfield, set_voted
from eth2._utils.bls import Domain, bls
from eth2.beacon.committee_helpers import (
get_committee_count,
get_crosslink_committee,
get_shard_delta,
get_start_shard,
)
from eth2.beacon.helpers import (
compute_domain,
compute_epoch_of_slot,
compute_start_slot_of_epoch,
get_active_validator_indices,
get_block_root,
get_block_root_at_slot,
get_domain,
)
from eth2.beacon.signature_domain import SignatureDomain
from eth2.beacon.state_machines.base import BaseBeaconStateMachine
from eth2.beacon.types.attestation_data import AttestationData
from eth2.beacon.types.attestation_data_and_custody_bits import (
AttestationDataAndCustodyBit,
)
from eth2.beacon.types.attestations import Attestation, IndexedAttestation
from eth2.beacon.types.attester_slashings import AttesterSlashing
from eth2.beacon.types.blocks import BeaconBlockHeader
from eth2.beacon.types.checkpoints import Checkpoint
from eth2.beacon.types.crosslinks import Crosslink
from eth2.beacon.types.deposit_data import DepositData
from eth2.beacon.types.pending_attestations import PendingAttestation
from eth2.beacon.types.proposer_slashings import ProposerSlashing
from eth2.beacon.types.states import BeaconState
from eth2.beacon.types.voluntary_exits import VoluntaryExit
from eth2.beacon.typing import (
Bitfield,
CommitteeIndex,
Epoch,
Gwei,
Shard,
Slot,
ValidatorIndex,
default_bitfield,
default_epoch,
default_shard,
)
from eth2.configs import CommitteeConfig, Eth2Config
# TODO(ralexstokes) merge w/ below
def _mk_pending_attestation(
bitfield: Bitfield = default_bitfield,
target_root: Hash32 = ZERO_HASH32,
target_epoch: Epoch = default_epoch,
shard: Shard = default_shard,
start_epoch: Epoch = default_epoch,
parent_root: Hash32 = ZERO_HASH32,
data_root: Hash32 = ZERO_HASH32,
) -> PendingAttestation:
return PendingAttestation(
aggregation_bits=bitfield,
data=AttestationData(
target=Checkpoint(epoch=target_epoch, root=target_root),
crosslink=Crosslink(
shard=shard,
parent_root=parent_root,
start_epoch=start_epoch,
end_epoch=target_epoch,
data_root=data_root,
),
),
)
def mk_pending_attestation_from_committee(
parent: Crosslink,
committee_size: int,
shard: Shard,
target_epoch: Epoch = default_epoch,
target_root: Hash32 = ZERO_HASH32,
data_root: Hash32 = ZERO_HASH32,
) -> PendingAttestation:
bitfield = get_empty_bitfield(committee_size)
for i in range(committee_size):
bitfield = set_voted(bitfield, i)
return _mk_pending_attestation(
bitfield=bitfield,
target_root=target_root,
target_epoch=target_epoch,
shard=shard,
start_epoch=parent.end_epoch,
parent_root=parent.hash_tree_root,
data_root=data_root,
)
def _mk_some_pending_attestations_with_some_participation_in_epoch(
state: BeaconState,
epoch: Epoch,
config: Eth2Config,
participation_ratio: float,
number_of_shards_to_check: int,
) -> Iterable[PendingAttestation]:
block_root = get_block_root(
state, epoch, config.SLOTS_PER_EPOCH, config.SLOTS_PER_HISTORICAL_ROOT
)
epoch_start_shard = get_start_shard(state, epoch, CommitteeConfig(config))
if epoch == state.current_epoch(config.SLOTS_PER_EPOCH):
parent_crosslinks = state.current_crosslinks
else:
parent_crosslinks = state.previous_crosslinks
for shard in range(
epoch_start_shard, epoch_start_shard + number_of_shards_to_check
):
shard = Shard(shard % config.SHARD_COUNT)
crosslink_committee = get_crosslink_committee(
state, epoch, shard, CommitteeConfig(config)
)
if not crosslink_committee:
continue
participants_count = math.ceil(participation_ratio * len(crosslink_committee))
if not participants_count:
return tuple()
yield mk_pending_attestation_from_committee(
parent_crosslinks[shard],
participants_count,
shard,
target_epoch=epoch,
target_root=block_root,
)
def mk_all_pending_attestations_with_some_participation_in_epoch(
state: BeaconState, epoch: Epoch, config: Eth2Config, participation_ratio: float
) -> Iterable[PendingAttestation]:
return _mk_some_pending_attestations_with_some_participation_in_epoch(
state,
epoch,
config,
participation_ratio,
get_shard_delta(state, epoch, CommitteeConfig(config)),
)
@to_tuple
def mk_all_pending_attestations_with_full_participation_in_epoch(
state: BeaconState, epoch: Epoch, config: Eth2Config
) -> Iterable[PendingAttestation]:
return mk_all_pending_attestations_with_some_participation_in_epoch(
state, epoch, config, 1.0
)
#
# Aggregation
#
def verify_votes(
message_hash: Hash32,
votes: Iterable[Tuple[ValidatorIndex, BLSSignature, BLSPubkey]],
domain: Domain,
) -> Tuple[Tuple[BLSSignature, ...], Tuple[ValidatorIndex, ...]]:
"""
Verify the given votes.
"""
sigs_with_committee_info = tuple(
(sig, committee_index)
for (committee_index, sig, pubkey) in votes
if bls.verify(
message_hash=message_hash, pubkey=pubkey, signature=sig, domain=domain
)
)
try:
sigs, committee_indices = zip(*sigs_with_committee_info)
except ValueError:
sigs = tuple()
committee_indices = tuple()
return sigs, committee_indices
def aggregate_votes(
bitfield: Bitfield,
sigs: Sequence[BLSSignature],
voting_sigs: Sequence[BLSSignature],
attesting_indices: Sequence[CommitteeIndex],
) -> Tuple[Bitfield, BLSSignature]:
"""
Aggregate the votes.
"""
# Update the bitfield and append the signatures
sigs = tuple(sigs) + tuple(voting_sigs)
bitfield = pipe(
bitfield,
*(set_voted(index=committee_index) for committee_index in attesting_indices)
)
return bitfield, bls.aggregate_signatures(sigs)
#
# Signer
#
def sign_proof_of_possession(deposit_data: DepositData, privkey: int) -> BLSSignature:
return bls.sign(
message_hash=deposit_data.signing_root,
privkey=privkey,
domain=compute_domain(SignatureDomain.DOMAIN_DEPOSIT),
)
def sign_transaction(
*,
message_hash: Hash32,
privkey: int,
state: BeaconState,
slot: Slot,
signature_domain: SignatureDomain,
slots_per_epoch: int
) -> BLSSignature:
domain = get_domain(
state,
signature_domain,
slots_per_epoch,
message_epoch=compute_epoch_of_slot(slot, slots_per_epoch),
)
return bls.sign(message_hash=message_hash, privkey=privkey, domain=domain)
SAMPLE_HASH_1 = Hash32(b"\x11" * 32)
SAMPLE_HASH_2 = Hash32(b"\x22" * 32)
def create_block_header_with_signature(
state: BeaconState,
body_root: Hash32,
privkey: int,
slots_per_epoch: int,
parent_root: Hash32 = SAMPLE_HASH_1,
state_root: Hash32 = SAMPLE_HASH_2,
) -> BeaconBlockHeader:
block_header = BeaconBlockHeader(
slot=state.slot,
parent_root=parent_root,
state_root=state_root,
body_root=body_root,
)
block_header_signature = sign_transaction(
message_hash=block_header.signing_root,
privkey=privkey,
state=state,
slot=block_header.slot,
signature_domain=SignatureDomain.DOMAIN_BEACON_PROPOSER,
slots_per_epoch=slots_per_epoch,
)
return block_header.copy(signature=block_header_signature)
#
#
# Only for test/simulation
#
#
#
# ProposerSlashing
#
def create_mock_proposer_slashing_at_block(
state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
block_root_1: Hash32,
block_root_2: Hash32,
proposer_index: ValidatorIndex,
) -> ProposerSlashing:
"""
Return a `ProposerSlashing` derived from the given block roots.
If the header roots do not match, the `ProposerSlashing` is valid.
If the header roots do match, the `ProposerSlashing` is not valid.
"""
slots_per_epoch = config.SLOTS_PER_EPOCH
block_header_1 = create_block_header_with_signature(
state,
block_root_1,
keymap[state.validators[proposer_index].pubkey],
slots_per_epoch,
)
block_header_2 = create_block_header_with_signature(
state,
block_root_2,
keymap[state.validators[proposer_index].pubkey],
slots_per_epoch,
)
return ProposerSlashing(
proposer_index=proposer_index, header_1=block_header_1, header_2=block_header_2
)
#
# AttesterSlashing
#
def create_mock_slashable_attestation(
state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
attestation_slot: Slot,
) -> IndexedAttestation:
"""
Create an `IndexedAttestation` that is signed by one attester.
"""
attester_index = ValidatorIndex(0)
committee = (attester_index,)
shard = Shard(0)
# Use genesis block root as `beacon_block_root`, only for tests.
beacon_block_root = get_block_root_at_slot(
state, attestation_slot, config.SLOTS_PER_HISTORICAL_ROOT
)
# Get `target_root`
target_root = _get_target_root(state, config, beacon_block_root)
# Get `source_root`
source_root = get_block_root_at_slot(
state,
compute_start_slot_of_epoch(
state.current_justified_checkpoint.epoch, config.SLOTS_PER_EPOCH
),
config.SLOTS_PER_HISTORICAL_ROOT,
)
previous_crosslink = state.current_crosslinks[shard]
attestation_data = AttestationData(
beacon_block_root=beacon_block_root,
source=Checkpoint(
epoch=state.current_justified_checkpoint.epoch, root=source_root
),
target=Checkpoint(
epoch=compute_epoch_of_slot(attestation_slot, config.SLOTS_PER_EPOCH),
root=target_root,
),
crosslink=previous_crosslink,
)
message_hash, attesting_indices = _get_mock_message_and_attesting_indices(
attestation_data, committee, num_voted_attesters=1
)
signature = sign_transaction(
message_hash=message_hash,
privkey=keymap[state.validators[attesting_indices[0]].pubkey],
state=state,
slot=attestation_slot,
signature_domain=SignatureDomain.DOMAIN_ATTESTATION,
slots_per_epoch=config.SLOTS_PER_EPOCH,
)
validator_indices = tuple(committee[i] for i in attesting_indices)
return IndexedAttestation(
custody_bit_0_indices=validator_indices,
custody_bit_1_indices=tuple(),
data=attestation_data,
signature=signature,
)
def create_mock_attester_slashing_is_double_vote(
state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
attestation_epoch: Epoch,
) -> AttesterSlashing:
attestation_slot_1 = compute_start_slot_of_epoch(
attestation_epoch, config.SLOTS_PER_EPOCH
)
attestation_slot_2 = Slot(attestation_slot_1 + 1)
slashable_attestation_1 = create_mock_slashable_attestation(
state, config, keymap, attestation_slot_1
)
slashable_attestation_2 = create_mock_slashable_attestation(
state, config, keymap, attestation_slot_2
)
return AttesterSlashing(
attestation_1=slashable_attestation_1, attestation_2=slashable_attestation_2
)
def create_mock_attester_slashing_is_surround_vote(
state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
attestation_epoch: Epoch,
) -> AttesterSlashing:
# target_epoch_2 < target_epoch_1
attestation_slot_2 = compute_start_slot_of_epoch(
attestation_epoch, config.SLOTS_PER_EPOCH
)
attestation_slot_1 = Slot(attestation_slot_2 + config.SLOTS_PER_EPOCH)
slashable_attestation_1 = create_mock_slashable_attestation(
state.copy(
slot=attestation_slot_1, current_justified_epoch=config.GENESIS_EPOCH
),
config,
keymap,
attestation_slot_1,
)
slashable_attestation_2 = create_mock_slashable_attestation(
state.copy(
slot=attestation_slot_1,
current_justified_epoch=config.GENESIS_EPOCH
+ 1, # source_epoch_1 < source_epoch_2
),
config,
keymap,
attestation_slot_2,
)
return AttesterSlashing(
attestation_1=slashable_attestation_1, attestation_2=slashable_attestation_2
)
#
# Attestation
#
def _get_target_root(
state: BeaconState, config: Eth2Config, beacon_block_root: Hash32
) -> Hash32:
epoch = compute_epoch_of_slot(state.slot, config.SLOTS_PER_EPOCH)
epoch_start_slot = compute_start_slot_of_epoch(epoch, config.SLOTS_PER_EPOCH)
if epoch_start_slot == state.slot:
return beacon_block_root
else:
return get_block_root(
state, epoch, config.SLOTS_PER_EPOCH, config.SLOTS_PER_HISTORICAL_ROOT
)
def _get_mock_message_and_attesting_indices(
attestation_data: AttestationData,
committee: Sequence[ValidatorIndex],
num_voted_attesters: int,
) -> Tuple[Hash32, Tuple[CommitteeIndex, ...]]:
"""
Get ``message_hash`` and voting indices of the given ``committee``.
"""
message_hash = AttestationDataAndCustodyBit(
data=attestation_data, custody_bit=False
).hash_tree_root
committee_size = len(committee)
assert num_voted_attesters <= committee_size
attesting_indices = tuple(
CommitteeIndex(i)
for i in random.sample(range(committee_size), num_voted_attesters)
)
return message_hash, tuple(sorted(attesting_indices))
def _create_mock_signed_attestation(
state: BeaconState,
attestation_data: AttestationData,
attestation_slot: Slot,
committee: Sequence[ValidatorIndex],
num_voted_attesters: int,
keymap: Dict[BLSPubkey, int],
slots_per_epoch: int,
) -> Attestation:
"""
Create a mocking attestation of the given ``attestation_data`` slot with ``keymap``.
"""
message_hash, attesting_indices = _get_mock_message_and_attesting_indices(
attestation_data, committee, num_voted_attesters
)
# Use privkeys to sign the attestation
signatures = [
sign_transaction(
message_hash=message_hash,
privkey=keymap[state.validators[committee[committee_index]].pubkey],
state=state,
slot=attestation_slot,
signature_domain=SignatureDomain.DOMAIN_ATTESTATION,
slots_per_epoch=slots_per_epoch,
)
for committee_index in attesting_indices
]
# aggregate signatures and construct participant bitfield
aggregation_bits, aggregate_signature = aggregate_votes(
bitfield=get_empty_bitfield(len(committee)),
sigs=(),
voting_sigs=signatures,
attesting_indices=attesting_indices,
)
# create attestation from attestation_data, particpipant_bitfield, and signature
return Attestation(
aggregation_bits=aggregation_bits,
data=attestation_data,
custody_bits=Bitfield((False,) * len(aggregation_bits)),
signature=aggregate_signature,
)
# TODO(ralexstokes) merge in w/ ``get_committee_assignment``
def get_crosslink_committees_at_slot(
state: BeaconState, slot: Slot, config: Eth2Config
) -> Tuple[Tuple[Tuple[ValidatorIndex, ...], Shard], ...]:
epoch = compute_epoch_of_slot(slot, config.SLOTS_PER_EPOCH)
active_validators = get_active_validator_indices(state.validators, epoch)
committees_per_slot = (
get_committee_count(
len(active_validators),
config.SHARD_COUNT,
config.SLOTS_PER_EPOCH,
config.TARGET_COMMITTEE_SIZE,
)
// config.SLOTS_PER_EPOCH
)
results = []
offset = committees_per_slot * (slot % config.SLOTS_PER_EPOCH)
slot_start_shard = Shard(
(get_start_shard(state, epoch, CommitteeConfig(config)) + offset)
% config.SHARD_COUNT
)
for i in range(committees_per_slot):
shard = (slot_start_shard + i) % config.SHARD_COUNT
committee = get_crosslink_committee(
state, epoch, shard, CommitteeConfig(config)
)
results.append((committee, Shard(shard)))
return tuple(results)
def create_signed_attestation_at_slot(
state: BeaconState,
config: Eth2Config,
state_machine: BaseBeaconStateMachine,
attestation_slot: Slot,
beacon_block_root: Hash32,
validator_privkeys: Dict[ValidatorIndex, int],
committee: Tuple[ValidatorIndex, ...],
shard: Shard,
) -> Attestation:
"""
Create the attestations of the given ``attestation_slot`` slot with ``validator_privkeys``.
"""
state_transition = state_machine.state_transition
state = state_transition.apply_state_transition(state, future_slot=attestation_slot)
target_epoch = compute_epoch_of_slot(attestation_slot, config.SLOTS_PER_EPOCH)
target_root = _get_target_root(state, config, beacon_block_root)
parent_crosslink = state.current_crosslinks[shard]
attestation_data = AttestationData(
beacon_block_root=beacon_block_root,
source=Checkpoint(
epoch=state.current_justified_checkpoint.epoch,
root=state.current_justified_checkpoint.root,
),
target=Checkpoint(root=target_root, epoch=target_epoch),
crosslink=Crosslink(
shard=shard,
parent_root=parent_crosslink.hash_tree_root,
start_epoch=parent_crosslink.end_epoch,
end_epoch=target_epoch,
),
)
return _create_mock_signed_attestation(
state,
attestation_data,
attestation_slot,
committee,
len(committee),
keymapper(lambda index: state.validators[index].pubkey, validator_privkeys),
config.SLOTS_PER_EPOCH,
)
@to_tuple
def create_mock_signed_attestations_at_slot(
state: BeaconState,
config: Eth2Config,
state_machine: BaseBeaconStateMachine,
attestation_slot: Slot,
beacon_block_root: Hash32,
keymap: Dict[BLSPubkey, int],
voted_attesters_ratio: float = 1.0,
) -> Iterable[Attestation]:
"""
Create the mocking attestations of the given ``attestation_slot`` slot with ``keymap``.
"""
crosslink_committees_at_slot = get_crosslink_committees_at_slot(
state, attestation_slot, config
)
# Get `target_root`
target_root = _get_target_root(state, config, beacon_block_root)
target_epoch = compute_epoch_of_slot(state.slot, config.SLOTS_PER_EPOCH)
for crosslink_committee in crosslink_committees_at_slot:
committee, shard = crosslink_committee
parent_crosslink = state.current_crosslinks[shard]
attestation_data = AttestationData(
beacon_block_root=beacon_block_root,
source=Checkpoint(
epoch=state.current_justified_checkpoint.epoch,
root=state.current_justified_checkpoint.root,
),
target=Checkpoint(root=target_root, epoch=target_epoch),
crosslink=Crosslink(
shard=shard,
parent_root=parent_crosslink.hash_tree_root,
start_epoch=parent_crosslink.end_epoch,
end_epoch=min(
target_epoch,
parent_crosslink.end_epoch + config.MAX_EPOCHS_PER_CROSSLINK,
),
),
)
num_voted_attesters = int(len(committee) * voted_attesters_ratio)
yield _create_mock_signed_attestation(
state,
attestation_data,
attestation_slot,
committee,
num_voted_attesters,
keymap,
config.SLOTS_PER_EPOCH,
)
#
# VoluntaryExit
#
def create_mock_voluntary_exit(
state: BeaconState,
config: Eth2Config,
keymap: Dict[BLSPubkey, int],
validator_index: ValidatorIndex,
exit_epoch: Epoch = None,
) -> VoluntaryExit:
current_epoch = state.current_epoch(config.SLOTS_PER_EPOCH)
target_epoch = current_epoch if exit_epoch is None else exit_epoch
voluntary_exit = VoluntaryExit(epoch=target_epoch, validator_index=validator_index)
return voluntary_exit.copy(
signature=sign_transaction(
message_hash=voluntary_exit.signing_root,
privkey=keymap[state.validators[validator_index].pubkey],
state=state,
slot=compute_start_slot_of_epoch(target_epoch, config.SLOTS_PER_EPOCH),
signature_domain=SignatureDomain.DOMAIN_VOLUNTARY_EXIT,
slots_per_epoch=config.SLOTS_PER_EPOCH,
)
)
#
# Deposit
#
def create_mock_deposit_data(
*,
config: Eth2Config,
pubkey: BLSPubkey,
privkey: int,
withdrawal_credentials: Hash32,
amount: Gwei = None
) -> DepositData:
if amount is None:
amount = config.MAX_EFFECTIVE_BALANCE
data = DepositData(
pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount
)
signature = sign_proof_of_possession(deposit_data=data, privkey=privkey)
return data.copy(signature=signature)
| 30.64598 | 95 | 0.704022 | [
"MIT"
] | AndrewBezold/trinity | eth2/beacon/tools/builder/validator.py | 21,728 | Python |
import base64
import deployment_options
import os
import tempfile
import utils
def render_file(namespace, private_key, public_key):
src_file = os.path.join(os.getcwd(), 'deploy/assisted-installer-local-auth.yaml')
dst_file = os.path.join(os.getcwd(), 'build', namespace, 'assisted-installer-local-auth.yaml')
with open(src_file, "r") as src:
with open(dst_file, "w+") as dst:
data = src.read()
data = data.replace('REPLACE_NAMESPACE', f'"{namespace}"')
data = data.replace('REPLACE_PRIVATE_KEY', f'"{private_key}"')
data = data.replace('REPLACE_PUBLIC_KEY', f'"{public_key}"')
print("Deploying {}".format(dst_file))
dst.write(data)
return dst_file
def encoded_contents(filename):
with open(filename, 'r') as f:
return base64.b64encode(bytearray(f.read(), 'utf-8')).decode('utf-8')
def main():
deploy_options = deployment_options.load_deployment_options()
utils.verify_build_directory(deploy_options.namespace)
# Render a file without values for the operator as we don't want every deployment to have the same values
if not deploy_options.apply_manifest:
render_file(deploy_options.namespace, "", "")
return
secret_name = 'assisted-installer-local-auth-key'
exists = utils.check_if_exists(
"secret",
secret_name,
target=deploy_options.target,
namespace=deploy_options.namespace,
profile=deploy_options.profile
)
if exists:
print(f'Secret {secret_name} already exists in namespace {deploy_options.namespace}')
return
output_dir = tempfile.TemporaryDirectory()
priv_path = os.path.join(output_dir.name, f'ec-private-key.pem')
pub_path = os.path.join(output_dir.name, f'ec-public-key.pem')
print(utils.check_output(f'openssl ecparam -name prime256v1 -genkey -noout -out {priv_path}'))
print(utils.check_output(f'openssl ec -in {priv_path} -pubout -out {pub_path}'))
secret_file = render_file(deploy_options.namespace, encoded_contents(priv_path), encoded_contents(pub_path))
utils.apply(
target=deploy_options.target,
namespace=deploy_options.namespace,
profile=deploy_options.profile,
file=secret_file
)
if __name__ == "__main__":
main()
| 34.761194 | 112 | 0.685702 | [
"Apache-2.0"
] | lack/assisted-service | tools/deploy_local_auth_secret.py | 2,329 | Python |
# -*- coding: utf-8 -*-
data = ''
with open('input.txt') as f:
data = f.read().strip()
def Reacts(a, b):
if a == b:
return False
if a.lower() == b or b.lower() == a:
return True
return False
def Collapse(polymer):
i = 1
while i < len(polymer):
if Reacts(polymer[i - 1], polymer[i]):
del(polymer[i-1])
del(polymer[i-1])
i = i - 2
i += 1
return polymer
#data = 'bbbbAaccc'
polymer = list(data)
p_c = Collapse(polymer)
print(len(p_c))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
len_min = len(polymer)
for l in alphabet:
filtered_data = data.replace(l, '').replace(l.upper(), '')
polymer = list(filtered_data)
p_c = Collapse(polymer)
print(l, len(p_c))
if len(p_c) < len_min:
len_min = len(p_c)
print(len_min) | 19.804348 | 63 | 0.514819 | [
"Apache-2.0"
] | uberales/aoc2018 | 05/aoc05.py | 911 | Python |
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import sys
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from nova.i18n import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.StrOpt('instances_path_share',
default="",
help='The name of a Windows share name mapped to the '
'"instances_path" dir and used by the resize feature '
'to copy files to the target host. If left blank, an '
'administrative share will be used, looking for the same '
'"instances_path" used locally'),
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
CONF.import_opt('instances_path', 'nova.compute.manager')
ERROR_INVALID_NAME = 123
class PathUtils(object):
def __init__(self):
self._smb_conn = wmi.WMI(moniker=r"root\Microsoft\Windows\SMB")
def open(self, path, mode):
"""Wrapper on __builtin__.open used to simplify unit testing."""
import __builtin__
return __builtin__.open(path, mode)
def exists(self, path):
return os.path.exists(path)
def makedirs(self, path):
os.makedirs(path)
def remove(self, path):
os.remove(path)
def rename(self, src, dest):
os.rename(src, dest)
def copyfile(self, src, dest):
self.copy(src, dest)
def copy(self, src, dest):
# With large files this is 2x-3x faster than shutil.copy(src, dest),
# especially when copying to a UNC target.
# shutil.copyfileobj(...) with a proper buffer is better than
# shutil.copy(...) but still 20% slower than a shell copy.
# It can be replaced with Win32 API calls to avoid the process
# spawning overhead.
output, ret = utils.execute('cmd.exe', '/C', 'copy', '/Y', src, dest)
if ret:
raise IOError(_('The file copy from %(src)s to %(dest)s failed')
% {'src': src, 'dest': dest})
def rmtree(self, path):
shutil.rmtree(path)
def get_instances_dir(self, remote_server=None):
local_instance_path = os.path.normpath(CONF.instances_path)
if remote_server:
if CONF.hyperv.instances_path_share:
path = CONF.hyperv.instances_path_share
else:
# Use an administrative share
path = local_instance_path.replace(':', '$')
return ('\\\\%(remote_server)s\\%(path)s' %
{'remote_server': remote_server, 'path': path})
else:
return local_instance_path
def _check_create_dir(self, path):
if not self.exists(path):
LOG.debug('Creating directory: %s', path)
self.makedirs(path)
def _check_remove_dir(self, path):
if self.exists(path):
LOG.debug('Removing directory: %s', path)
self.rmtree(path)
def _get_instances_sub_dir(self, dir_name, remote_server=None,
create_dir=True, remove_dir=False):
instances_path = self.get_instances_dir(remote_server)
path = os.path.join(instances_path, dir_name)
try:
if remove_dir:
self._check_remove_dir(path)
if create_dir:
self._check_create_dir(path)
return path
except WindowsError as ex:
if ex.winerror == ERROR_INVALID_NAME:
raise vmutils.HyperVException(_(
"Cannot access \"%(instances_path)s\", make sure the "
"path exists and that you have the proper permissions. "
"In particular Nova-Compute must not be executed with the "
"builtin SYSTEM account or other accounts unable to "
"authenticate on a remote host.") %
{'instances_path': instances_path})
raise
def get_instance_migr_revert_dir(self, instance_name, create_dir=False,
remove_dir=False):
dir_name = '%s_revert' % instance_name
return self._get_instances_sub_dir(dir_name, None, create_dir,
remove_dir)
def get_instance_dir(self, instance_name, remote_server=None,
create_dir=True, remove_dir=False):
return self._get_instances_sub_dir(instance_name, remote_server,
create_dir, remove_dir)
def _lookup_vhd_path(self, instance_name, vhd_path_func):
vhd_path = None
for format_ext in ['vhd', 'vhdx']:
test_path = vhd_path_func(instance_name, format_ext)
if self.exists(test_path):
vhd_path = test_path
break
return vhd_path
def lookup_root_vhd_path(self, instance_name):
return self._lookup_vhd_path(instance_name, self.get_root_vhd_path)
def lookup_configdrive_path(self, instance_name):
configdrive_path = None
for format_ext in constants.DISK_FORMAT_MAP:
test_path = self.get_configdrive_path(instance_name, format_ext)
if self.exists(test_path):
configdrive_path = test_path
break
return configdrive_path
def lookup_ephemeral_vhd_path(self, instance_name):
return self._lookup_vhd_path(instance_name,
self.get_ephemeral_vhd_path)
def get_root_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'root.' + format_ext.lower())
def get_configdrive_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'configdrive.' + format_ext.lower())
def get_ephemeral_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'ephemeral.' + format_ext.lower())
def get_base_vhd_dir(self):
return self._get_instances_sub_dir('_base')
def get_export_dir(self, instance_name):
dir_name = os.path.join('export', instance_name)
return self._get_instances_sub_dir(dir_name, create_dir=True,
remove_dir=True)
def get_vm_console_log_paths(self, vm_name, remote_server=None):
instance_dir = self.get_instance_dir(vm_name,
remote_server)
console_log_path = os.path.join(instance_dir, 'console.log')
return console_log_path, console_log_path + '.1'
def check_smb_mapping(self, smbfs_share):
mappings = self._smb_conn.Msft_SmbMapping(RemotePath=smbfs_share)
if not mappings:
return False
if os.path.exists(smbfs_share):
LOG.debug('Share already mounted: %s', smbfs_share)
return True
else:
LOG.debug('Share exists but is unavailable: %s ', smbfs_share)
self.unmount_smb_share(smbfs_share, force=True)
return False
def mount_smb_share(self, smbfs_share, username=None, password=None):
try:
LOG.debug('Mounting share: %s', smbfs_share)
self._smb_conn.Msft_SmbMapping.Create(RemotePath=smbfs_share,
UserName=username,
Password=password)
except wmi.x_wmi as exc:
err_msg = (_(
'Unable to mount SMBFS share: %(smbfs_share)s '
'WMI exception: %(wmi_exc)s'), {'smbfs_share': smbfs_share,
'wmi_exc': exc})
raise vmutils.HyperVException(err_msg)
def unmount_smb_share(self, smbfs_share, force=False):
mappings = self._smb_conn.Msft_SmbMapping(RemotePath=smbfs_share)
if not mappings:
LOG.debug('Share %s is not mounted. Skipping unmount.',
smbfs_share)
for mapping in mappings:
# Due to a bug in the WMI module, getting the output of
# methods returning None will raise an AttributeError
try:
mapping.Remove(Force=force)
except AttributeError:
pass
except wmi.x_wmi:
# If this fails, a 'Generic Failure' exception is raised.
# This happens even if we unforcefully unmount an in-use
# share, for which reason we'll simply ignore it in this
# case.
if force:
raise vmutils.HyperVException(
_("Could not unmount share: %s"), smbfs_share)
| 39.295082 | 79 | 0.609929 | [
"Apache-2.0"
] | bopopescu/nested_quota_final | nova/virt/hyperv/pathutils.py | 9,588 | Python |
"""
Module for all Form Tests.
"""
import pytest
from django.utils.translation import gettext_lazy as _
from djcutter.users.forms import UserCreationForm
from djcutter.users.models import User
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
"""
Test class for all tests related to the UserCreationForm
"""
def test_username_validation_error_msg(self, user: User):
"""
Tests UserCreation Form's unique validator functions correctly by testing:
1) A new user with an existing username cannot be added.
2) Only 1 error is raised by the UserCreation Form
3) The desired error message is raised
"""
# The user already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": user.username,
"password1": user.password,
"password2": user.password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
assert form.errors["username"][0] == _("This username has already been taken.")
| 29.125 | 87 | 0.629185 | [
"MIT"
] | macbotxxx/djcutter | djcutter/users/tests/test_forms.py | 1,165 | Python |
#!/usr/bin/env python3
import depthai as dai
import subprocess as sp
from os import name as osName
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and output
camRgb = pipeline.createColorCamera()
videoEnc = pipeline.createVideoEncoder()
xout = pipeline.createXLinkOut()
xout.setStreamName("h264")
# Properties
camRgb.setBoardSocket(dai.CameraBoardSocket.RGB)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)
videoEnc.setDefaultProfilePreset(camRgb.getVideoSize(), camRgb.getFps(), dai.VideoEncoderProperties.Profile.H264_MAIN)
# Linking
camRgb.video.link(videoEnc.input)
videoEnc.bitstream.link(xout.input)
width, height = 720, 500
command = [
"ffplay",
"-i", "-",
"-x", str(width),
"-y", str(height),
"-framerate", "60",
"-fflags", "nobuffer",
"-flags", "low_delay",
"-framedrop",
"-strict", "experimental"
]
if osName == "nt": # Running on Windows
command = ["cmd", "/c"] + command
try:
proc = sp.Popen(command, stdin=sp.PIPE) # Start the ffplay process
except:
exit("Error: cannot run ffplay!\nTry running: sudo apt install ffmpeg")
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Output queue will be used to get the encoded data from the output defined above
q = device.getOutputQueue(name="h264", maxSize=30, blocking=True)
try:
while True:
data = q.get().getData() # Blocking call, will wait until new data has arrived
proc.stdin.write(data)
except:
pass
proc.stdin.close()
| 26.183333 | 118 | 0.694462 | [
"MIT"
] | AsherVo/depthai-experiments | gen2-play-encoded-stream/main.py | 1,571 | Python |
# https://github.com/TrustyJAID/Trusty-cogs/blob/master/notsobot/converter.py
import re
from discord.ext.commands.converter import Converter
from discord.ext.commands.errors import BadArgument
from redbot.core.i18n import Translator
_ = Translator("ReverseImageSearch", __file__)
IMAGE_LINKS = re.compile(
r"(https?://[^\"\'\s]*\.(?:png|jpg|jpeg|gif|svg)(\?size=[0-9]*)?)", flags=re.I
)
EMOJI_REGEX = re.compile(r"(<(a)?:[a-zA-Z0-9_]+:([0-9]+)>)")
MENTION_REGEX = re.compile(r"<@!?([0-9]+)>")
ID_REGEX = re.compile(r"[0-9]{17,}")
class ImageFinder(Converter):
"""
This is a class to convert notsobots image searching capabilities
into a more general converter class
"""
async def convert(self, ctx, argument):
attachments = ctx.message.attachments
mentions = MENTION_REGEX.finditer(argument)
matches = IMAGE_LINKS.finditer(argument)
emojis = EMOJI_REGEX.finditer(argument)
ids = ID_REGEX.finditer(argument)
urls = []
if matches:
for match in matches:
# print(match.group(1))
urls.append(match.group(1))
if emojis:
for emoji in emojis:
ext = "gif" if emoji.group(2) else "png"
url = "https://cdn.discordapp.com/emojis/{id}.{ext}?v=1".format(
id=emoji.group(3), ext=ext
)
urls.append(url)
if mentions:
for mention in mentions:
user = ctx.guild.get_member(int(mention.group(1)))
if user.is_avatar_animated():
url = IMAGE_LINKS.search(str(user.avatar_url_as(format="gif")))
else:
url = IMAGE_LINKS.search(str(user.avatar_url_as(format="png")))
urls.append(url.group(1))
if not urls and ids:
for possible_id in ids:
user = ctx.guild.get_member(int(possible_id.group(0)))
if user:
if user.is_avatar_animated():
url = IMAGE_LINKS.search(str(user.avatar_url_as(format="gif")))
else:
url = IMAGE_LINKS.search(str(user.avatar_url_as(format="png")))
urls.append(url.group(1))
if attachments:
for attachment in attachments:
urls.append(attachment.url)
if not urls:
ctx.command.reset_cooldown(ctx)
raise BadArgument(_("No images provided."))
return urls
async def search_for_images(self, ctx):
urls = []
async for message in ctx.channel.history(limit=10):
if message.attachments:
for attachment in message.attachments:
urls.append(attachment.url)
match = IMAGE_LINKS.match(message.content)
if match:
urls.append(match.group(1))
if not urls:
ctx.command.reset_cooldown(ctx)
raise ValueError(_("No Images found in recent history."))
return urls
| 37.5 | 87 | 0.569431 | [
"MIT"
] | Danstr5544/Fixator10-Cogs | reverseimagesearch/converters.py | 3,075 | Python |
Subsets and Splits