hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a273f551d8225ab3bbd470297a52534408bc01e | 35,624 | py | Python | src/cogent3/app/data_store.py | u6052029/cogent3 | ca0efcb7f60b715bcbfbecd924cdb98a53cefe20 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/app/data_store.py | u6052029/cogent3 | ca0efcb7f60b715bcbfbecd924cdb98a53cefe20 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/app/data_store.py | u6052029/cogent3 | ca0efcb7f60b715bcbfbecd924cdb98a53cefe20 | [
"BSD-3-Clause"
] | null | null | null | import glob
import json
import os
import pathlib
import re
import shutil
import weakref
import zipfile
from collections import defaultdict
from fnmatch import fnmatch, translate
from io import TextIOWrapper
from json import JSONDecodeError
from pathlib import Path
from pprint import pprint
from warnings import warn
from scitrack import get_text_hexdigest
from tinydb import Query, TinyDB
from tinydb.middlewares import CachingMiddleware
from tinydb.storages import JSONStorage
from cogent3.util.deserialise import deserialise_not_completed
from cogent3.util.misc import (
atomic_write,
extend_docstring_from,
get_format_suffixes,
open_,
)
from cogent3.util.table import Table
from cogent3.util.union_dict import UnionDict
__author__ = "Gavin Huttley"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__maintainer__ = "Gavin Huttley"
__email__ = "[email protected]"
__status__ = "Alpha"
# handling archive, member existence
SKIP = "skip"
OVERWRITE = "overwrite"
RAISE = "raise"
IGNORE = "ignore"
def make_record_for_json(identifier, data, completed):
"""returns a dict for storage as json"""
try:
data = data.to_rich_dict()
except AttributeError:
pass
data = json.dumps(data)
return dict(identifier=identifier, data=data, completed=completed)
def load_record_from_json(data):
"""returns identifier, data, completed status from json string"""
if type(data) == str:
data = json.loads(data)
value = data["data"]
if isinstance(value, str):
try:
value = json.loads(value)
except JSONDecodeError:
pass
return data["identifier"], value, data["completed"]
class DataStoreMember(str):
def __new__(klass, name, parent=None, id=None):
result = str.__new__(klass, name)
result.name = os.path.basename(name)
result.parent = parent
result._file = None
result.id = id
return result
def read(self):
"""returns contents"""
return self.parent.read(self)
def open(self):
"""returns file-like object"""
if self._file is None:
self._file = self.parent.open(self.name)
return self._file
def close(self):
"""closes file"""
if self._file is None:
return
self._file.close()
self._file = None
@property
def md5(self):
return self.parent.md5(self, force=True)
class ReadOnlyDataStoreBase:
"""a read only data store"""
store_suffix = None
def __init__(self, source, suffix=None, limit=None, verbose=False, md5=True):
"""
Parameters
----------
source
path to directory / zip file. Forced to end with store_suffix.
suffix
only members whose name matches the suffix are considered included
limit
the maximum number of members to consider
verbose
displays files that don't match search (applies only to the Zipped
variant)
md5 : bool
record md5 hexadecimal checksum of read data when possible
"""
# assuming delimiter is /
# todo this approach to caching persistent arguments for reconstruction
# is fragile. Need an inspect module based approach
d = locals()
self._persistent = UnionDict({k: v for k, v in d.items() if k != "self"})
source = str(source)
suffix = suffix or ""
if suffix != "*": # wild card search for all
suffix = re.sub(r"^[\s.*]+", "", suffix) # tidy the suffix
source = re.sub(r"/+$", "", source) # tidy the source
self.suffix = suffix
if self.store_suffix and not source.endswith(self.store_suffix):
source = ".".join([source, self.store_suffix])
self.source = str(pathlib.Path(source).expanduser())
self.mode = "r"
self._members = []
self.limit = limit
self._verbose = verbose
self._md5 = md5
self._checksums = {}
def __getstate__(self):
data = self._persistent.copy()
return data
def __setstate__(self, data):
new = self.__class__(**data)
self.__dict__.update(new.__dict__)
return self
def __repr__(self):
if len(self) > 3:
sample = str(list(self[:3]))
sample = f"{sample[:-1]}..."
else:
sample = list(self)
num = len(self)
name = self.__class__.__name__
txt = f"{num}x member {name}(source='{self.source}', members={sample})"
return txt
def __str__(self):
return str(list(self))
def head(self, n=5):
"""displays top n members"""
pprint(self[:n])
def tail(self, n=5):
"""displays last n members"""
pprint(self[-n:])
def __iter__(self):
for i, member in enumerate(self.members):
if not isinstance(member, DataStoreMember):
member = DataStoreMember(self.get_absolute_identifier(member), self)
self.members[i] = member
yield member
def __getitem__(self, index):
return self.members[index]
def __len__(self):
return len(self.members)
def __contains__(self, identifier):
"""whether relative identifier has been stored"""
if isinstance(identifier, DataStoreMember):
return identifier.parent is self
if not identifier.endswith(self.suffix):
suffix = pathlib.Path(identifier).suffix
# possible an "added" file
if self.store_suffix == "zip":
klass = ReadOnlyZippedDataStore
else:
klass = ReadOnlyDirectoryDataStore
new = klass(self.source, suffix=suffix)
return identifier in new
identifier = self.get_relative_identifier(identifier)
result = False
for member in self.members:
if identifier in member:
result = True
break
return result
def get_member(self, identifier):
"""returns DataStoreMember"""
identifier = self.get_relative_identifier(identifier)
for member in self.members:
if identifier in member:
return member
return None
def get_relative_identifier(self, identifier):
"""returns the identifier relative to store root path
"""
if isinstance(identifier, DataStoreMember) and identifier.parent is self:
return identifier
source = self.source
identifier = os.path.basename(identifier)
if source.endswith(".zip"):
# we insert the source path into identifier name
# for zip members to ensure inflation creates a directory
# containing them
source = source.replace(".zip", "")
source = os.path.basename(source)
identifier = f"{source}{os.sep}{identifier}"
else:
identifier = Path(identifier)
identifier = identifier.name
return identifier
def get_absolute_identifier(self, identifier, from_relative=False):
"""returns the identifier relative to the root path
"""
if not from_relative:
identifier = self.get_relative_identifier(identifier)
source = self.source.replace(".zip", "")
if isinstance(identifier, DataStoreMember):
identifier = identifier.name
elif not identifier.startswith(source):
identifier = f"{source}{os.sep}{identifier}"
return identifier
def read(self, identifier):
"""reads data corresponding to identifier"""
if isinstance(identifier, DataStoreMember) and identifier.parent is self:
identifier = identifier.name
source = self.open(identifier)
data = source.read()
if self._md5:
self._checksums[identifier] = get_text_hexdigest(data)
source.close()
return data
@property
def members(self):
raise NotImplementedError # override in subclasses
def open(self, identifier):
raise NotImplementedError
def filtered(self, pattern=None, callback=None):
"""returns list of members for which callback returns True"""
assert any([callback, pattern]), "Must provide a pattern or a callback"
if pattern:
result = [m for m in self if fnmatch(m, pattern)]
else:
result = [m for m in self if callback(m)]
return result
def md5(self, identifier, force=True):
"""
Parameters
----------
identifier
name of data store member
force : bool
forces reading of data if not already done
Returns
-------
md5 checksum for the member, if available, None otherwise
"""
md5_setting = self._md5 # for restoring automatic md5 calc setting
absoluteid = self.get_absolute_identifier(identifier)
if force and absoluteid not in self._checksums:
self._md5 = True
_ = self.read(absoluteid)
result = self._checksums.get(absoluteid, None)
self._md5 = md5_setting
return result
class ReadOnlyDirectoryDataStore(ReadOnlyDataStoreBase):
@property
def members(self):
if not self._members:
pattern = "%s/**/*.%s" % (self.source, self.suffix)
paths = glob.iglob(pattern, recursive=True)
members = []
for i, path in enumerate(paths):
if self.limit and i >= self.limit:
break
member = DataStoreMember(self.get_absolute_identifier(path), self)
members.append(member)
self._members = members
return self._members
def open(self, identifier):
identifier = self.get_absolute_identifier(identifier, from_relative=False)
if not os.path.exists(identifier):
raise ValueError(f"path '{identifier}' does not exist")
infile = open_(identifier)
return infile
class SingleReadDataStore(ReadOnlyDirectoryDataStore):
"""simplified for a single file"""
def __init__(self, source, *args, **kwargs):
"""
Parameters
source
path to one file
args
ignored
kwargs
ignored
"""
path = Path(source)
assert path.exists() and path.is_file()
super(SingleReadDataStore, self).__init__(
str(path.parent), suffix=str(path.suffix)
)
self._members = [DataStoreMember(path, self)]
class ReadOnlyZippedDataStore(ReadOnlyDataStoreBase):
store_suffix = "zip"
@property
def members(self):
if os.path.exists(self.source) and not self._members:
source_path = self.source.replace(Path(self.source).suffix, "")
pattern = "*.%s" % self.suffix
members = []
with zipfile.ZipFile(self.source) as archive:
names = archive.namelist()
num_matches = 0
for name in names:
name = os.path.basename(name)
if fnmatch(name, pattern):
num_matches += 1
member = DataStoreMember(os.path.join(source_path, name), self)
members.append(member)
elif self._verbose:
print(f"Did not match {name}")
if self.limit and num_matches >= self.limit:
break
self._members = members
return self._members
def open(self, identifier):
identifier = self.get_relative_identifier(identifier)
archive = zipfile.ZipFile(self.source)
record = archive.open(identifier.replace("\\", "/"))
record = TextIOWrapper(record, encoding="latin-1")
return record
class WritableDataStoreBase:
def __init__(self, if_exists=RAISE, create=False):
"""
Parameters
----------
if_exists : str
behaviour when the destination already exists. Valid constants are
defined in this file as OVERWRITE, SKIP, RAISE, IGNORE (they
correspond to lower case version of the same word)
create : bool
if True, the destination is created
"""
d = locals()
d = UnionDict({k: v for k, v in d.items() if k != "self"})
if self._persistent:
self._persistent |= d
else:
self._persistent = d
self._members = []
if_exists = if_exists.lower()
assert if_exists in (OVERWRITE, SKIP, RAISE, IGNORE)
if create is False and if_exists == OVERWRITE:
warn(f"'{OVERWRITE}' reset to '{IGNORE}' and create=True", UserWarning)
create = True
self._source_create_delete(if_exists, create)
def make_relative_identifier(self, data):
"""returns identifier for a new member relative to source"""
if isinstance(data, DataStoreMember):
data = data.name
elif type(data) != str:
try:
data = data.info.source
except AttributeError:
try:
data = data.source
except AttributeError:
raise ValueError(
"objects for storage require either a "
"source or info.source string attribute"
)
basename = os.path.basename(data)
suffix, comp = get_format_suffixes(basename)
if suffix and comp:
pattern = f".{suffix}.{comp}$"
elif suffix:
pattern = f".{suffix}$"
elif comp:
pattern = f".{comp}*$"
else:
pattern = None
if pattern:
basename = re.sub(pattern, "", basename)
basename = f"{basename}.{self.suffix}"
return basename
def make_absolute_identifier(self, data):
"""returns a absolute identifier for a new member, includes source"""
basename = self.make_relative_identifier(data)
identifier = self.get_absolute_identifier(basename, from_relative=True)
return identifier
def add_file(self, path, make_unique=True, keep_suffix=True, cleanup=False):
"""
Parameters
----------
path : str
location of file to be added to the data store
keep_suffix : bool
new path will retain the suffix of the provided file
make_unique : bool
a successive number will be added to the name before the suffix
until the name is unique
cleanup : bool
delete the original
"""
relativeid = self.make_relative_identifier(path)
relativeid = Path(relativeid)
path = Path(path)
if keep_suffix:
relativeid = str(relativeid).replace(
relativeid.suffix, "".join(path.suffixes)
)
relativeid = Path(relativeid)
suffixes = "".join(relativeid.suffixes)
new = relativeid
num = 0
while True:
if not str(relativeid) in self:
if num:
new = str(relativeid).replace(suffixes, f"-{num}{suffixes}")
break
num += 1
relativeid = new
data = SingleReadDataStore(path)[0].read()
self.write(str(relativeid), data)
if cleanup:
path.unlink()
return relativeid
def write_incomplete(self, identifier, not_completed):
"""
Parameters
----------
identifier : str
identifier for record
not_completed : NotComplete
instance that records key details for why incomplete
Returns
-------
None if storage class does not support writing incomplete, otherwise
a DataStoreMember.
"""
if self.suffix != "json":
msg = f"not supported for {self.__class__.__name__}"
warn(msg, UserWarning)
return
record = make_record_for_json(identifier, not_completed, False)
record = json.dumps(record)
self.write(identifier, record)
def write(self, *args, **kwargs):
"""
Parameters
----------
identifier : str
identifier that data wil be saved under
data
data to be saved
Returns
-------
DataStoreMember instance
"""
raise NotImplementedError
def close(self):
pass
class WritableDirectoryDataStore(ReadOnlyDirectoryDataStore, WritableDataStoreBase):
def __init__(
self,
source,
suffix,
mode="w",
if_exists=RAISE,
create=False,
md5=True,
**kwargs,
):
"""
Parameters
----------
source
path to directory / zip file
suffix
only members whose name matches the suffix are considered included
mode : str
file opening mode, defaults to write
if_exists : str
behaviour when the destination already exists. Valid constants are
defined in this file as OVERWRITE, SKIP, RAISE, IGNORE (they
correspond to lower case version of the same word)
create : bool
if True, the destination is created
md5 : bool
record md5 hexadecimal checksum of data when possible
"""
assert "w" in mode or "a" in mode
ReadOnlyDirectoryDataStore.__init__(self, source=source, suffix=suffix, md5=md5)
WritableDataStoreBase.__init__(self, if_exists=if_exists, create=create)
d = locals()
self._persistent = {k: v for k, v in d.items() if k != "self"}
self.mode = mode
def _has_other_suffixes(self, path, suffix):
p = Path(path)
allowed = {str(suffix), "log"}
for f in p.iterdir():
if get_format_suffixes(str(f))[0] not in allowed:
return True
return False
def _source_create_delete(self, if_exists, create):
exists = os.path.exists(self.source)
if exists and if_exists == RAISE:
raise RuntimeError(f"'{self.source}' exists")
elif exists and if_exists == OVERWRITE:
if self._has_other_suffixes(self.source, self.suffix):
raise RuntimeError(
f"Unsafe to delete {self.source} as it contains ",
f"files other than .{self.suffix} or .log files."
" You will need to remove this directly yourself.",
)
try:
shutil.rmtree(self.source)
except NotADirectoryError:
os.remove(self.source)
elif not exists and not create:
raise RuntimeError(f"'{self.source}' does not exist")
if create:
os.makedirs(self.source, exist_ok=True)
@extend_docstring_from(WritableDataStoreBase.write)
def write(self, identifier, data):
relative_id = self.get_relative_identifier(identifier)
absolute_id = self.get_absolute_identifier(relative_id, from_relative=True)
if self._md5:
self._checksums[absolute_id] = get_text_hexdigest(data)
with atomic_write(str(absolute_id), in_zip=False) as out:
out.write(data)
member = DataStoreMember(relative_id, self)
if relative_id not in self and relative_id.endswith(self.suffix):
self._members.append(member)
return member
class WritableZippedDataStore(ReadOnlyZippedDataStore, WritableDataStoreBase):
def __init__(
self,
source,
suffix,
mode="a",
if_exists=RAISE,
create=False,
md5=True,
**kwargs,
):
"""
Parameters
----------
source
path to directory / zip file
suffix
only members whose name matches the suffix are considered included
mode : str
file opening mode, defaults to append
if_exists : str
behaviour when the destination already exists. Valid constants are
defined in this file as OVERWRITE, SKIP, RAISE, IGNORE (they
correspond to lower case version of the same word)
create : bool
if True, the destination is created
md5 : bool
record md5 hexadecimal checksum of data when possible
"""
ReadOnlyZippedDataStore.__init__(self, source=source, suffix=suffix, md5=md5)
WritableDataStoreBase.__init__(self, if_exists=if_exists, create=create)
d = locals()
self._persistent = {k: v for k, v in d.items() if k != "self"}
self.mode = "a" or mode
def _has_other_suffixes(self, path, suffix):
allowed = {str(suffix), "log"}
for f in zipfile.ZipFile(path).namelist():
if get_format_suffixes(str(f))[0] not in allowed:
return True
return False
def _source_create_delete(self, if_exists, create):
exists = os.path.exists(self.source)
dirname = os.path.dirname(self.source)
if exists and if_exists == RAISE:
raise RuntimeError(f"'{self.source}' exists")
elif exists and if_exists == OVERWRITE:
if self._has_other_suffixes(self.source, self.suffix):
raise RuntimeError(
f"Unsafe to delete {self.source} as it contains ",
f"files other than .{self.suffix} or .log files."
" You will need to remove this directly yourself.",
)
os.remove(self.source)
elif dirname and not os.path.exists(dirname) and not create:
raise RuntimeError(f"'{dirname}' does not exist")
if create and dirname:
os.makedirs(dirname, exist_ok=True)
@extend_docstring_from(WritableDataStoreBase.write)
def write(self, identifier, data):
relative_id = self.get_relative_identifier(identifier)
absolute_id = self.get_absolute_identifier(relative_id, from_relative=True)
if self._md5:
self._checksums[absolute_id] = get_text_hexdigest(data)
with atomic_write(str(relative_id), in_zip=self.source) as out:
out.write(data)
member = DataStoreMember(relative_id, self)
if relative_id not in self and relative_id.endswith(self.suffix):
self._members.append(member)
return member
def _db_lockid(path):
"""returns value for pid in LOCK record or None"""
if not os.path.exists(path):
return None
db = TinyDB(path)
query = Query().identifier.matches("LOCK")
got = db.get(query)
lockid = None if not got else got["pid"]
db.close()
return lockid
class ReadOnlyTinyDbDataStore(ReadOnlyDataStoreBase):
"""A TinyDB based json data store"""
store_suffix = "tinydb"
def __init__(self, *args, **kwargs):
kwargs["suffix"] = "json"
super(ReadOnlyTinyDbDataStore, self).__init__(*args, **kwargs)
self._db = None
self._finish = None
def __contains__(self, identifier):
"""whether identifier has been stored here"""
if isinstance(identifier, DataStoreMember):
return identifier.parent is self
query = Query().identifier.matches(identifier)
return self.db.contains(query)
def __repr__(self):
txt = super().__repr__()
query = Query().completed == False
num = self.db.count(query)
if num > 0:
txt = f"{txt}, {num}x incomplete"
return txt
@property
def db(self):
if self._db is None:
storage = CachingMiddleware(JSONStorage)
storage.WRITE_CACHE_SIZE = 50 # todo support for user specifying
self._db = TinyDB(self.source, storage=storage)
name = self.__class__.__name__
if "readonly" in name.lower():
# remove interface for inserting records making this a read only db
self._db.insert = None
else:
self.lock()
self._finish = weakref.finalize(self, self._close, self._db)
return self._db
def __del__(self):
self.close()
@classmethod
def _close(cls, db):
try:
db.storage.flush()
db.close()
except ValueError:
# file probably already closed
pass
def close(self):
"""closes the data store"""
try:
self.unlock()
self.db.storage.flush()
except ValueError:
# file probably already closed
pass
self._finish()
self._finish.detach()
def lock(self):
"""if writable, and not locked, locks the database to this pid
"""
if not self.locked:
self._db.insert(dict(identifier="LOCK", pid=os.getpid()))
self._db.storage.flush()
@property
def locked(self):
"""returns lock pid or None if unlocked or pid matches self"""
return _db_lockid(self.source) is not None
def unlock(self, force=False):
"""remove a lock if pid matches. If force, ignores pid."""
if "readonly" in self.__class__.__name__:
# not allowed to touch a lock
return
query = Query().identifier.matches("LOCK")
got = self.db.get(query)
if not got:
return
lock_id = got["pid"]
if lock_id == os.getpid() or force:
self.db.remove(query)
self.db.storage.flush()
return lock_id
@property
def incomplete(self):
"""returns database records with completed=False"""
query = Query().completed == False
incomplete = []
for record in self.db.search(query):
member = DataStoreMember(record["identifier"], self, id=record.doc_id)
incomplete.append(member)
return incomplete
@property
def summary_incomplete(self):
"""returns a table summarising incomplete results"""
types = defaultdict(list)
indices = "type", "origin"
for member in self.incomplete:
record = member.read()
record = deserialise_not_completed(record)
key = tuple(getattr(record, k, None) for k in indices)
types[key].append([record.message, record.source])
header = list(indices) + ["message", "num", "source"]
rows = []
for record in types:
messages, sources = list(zip(*types[record]))
messages = list(sorted(set(messages)))
if len(messages) > 3:
messages = messages[:3] + ["..."]
if len(sources) > 3:
sources = sources[:3] + ("...",)
row = list(record) + [
", ".join(messages),
len(types[record]),
", ".join(sources),
]
rows.append(row)
table = Table(header=header, data=rows, title="incomplete records")
return table
@property
def members(self):
if not self._members:
if self.suffix:
pattern = translate("*.%s" % self.suffix)
else:
pattern = translate("*")
members = []
query = Query()
query = (query.identifier.matches(pattern)) & (query.completed == True)
for record in self.db.search(query):
member = DataStoreMember(record["identifier"], self, id=record.doc_id)
members.append(member)
if self.limit and len(members) >= self.limit:
break
self._members = members
return self._members
@extend_docstring_from(ReadOnlyDataStoreBase.get_absolute_identifier, pre=True)
def get_absolute_identifier(self, identifier, from_relative=True):
"""For tinydb, this is the same as the relative identifier"""
return self.get_relative_identifier(identifier)
@extend_docstring_from(ReadOnlyDataStoreBase.get_relative_identifier)
def get_relative_identifier(self, identifier):
if isinstance(identifier, DataStoreMember) and identifier.parent is self:
return identifier
identifier = Path(identifier)
identifier = identifier.name
return identifier
def open(self, identifier):
if getattr(identifier, "parent", None) is not self:
member = self.get_member(identifier)
else:
member = identifier
_, record, _ = load_record_from_json(self.db.get(doc_id=member.id))
return record
def read(self, identifier):
data = self.open(identifier)
if self._md5 and isinstance(data, str):
self._checksums[identifier] = get_text_hexdigest(data)
return data
@extend_docstring_from(ReadOnlyDataStoreBase.md5)
def md5(self, member, force=True):
md5_setting = self._md5 # for restoring automatic md5 calc setting
if not getattr(member, "id", None):
member = self.filtered(member)[0]
if force and member not in self._checksums:
self._md5 = True
_ = member.read()
result = self._checksums.get(member, None)
self._md5 = md5_setting
return result
@property
def logs(self):
"""returns all records with a .log suffix"""
logfiles = []
query = Query().identifier.matches(translate("*.log"))
for record in self.db.search(query):
member = DataStoreMember(record["identifier"], self, id=record.doc_id)
logfiles.append(member)
return logfiles
@property
def summary_logs(self):
"""returns a table summarising log files"""
rows = []
for record in self.logs:
data = record.read().splitlines()
first = data.pop(0).split("\t")
row = [first[0], record.name]
key = None
mapped = {}
for line in data:
line = line.split("\t")[-1].split(" : ", maxsplit=1)
if len(line) == 1:
mapped[key] += line[0]
continue
key = line[0]
mapped[key] = line[1]
data = mapped
row.extend(
[
data["python"],
data["user"],
data["command_string"],
data["composable function"],
]
)
rows.append(row)
table = Table(
header=["time", "name", "python version", "who", "command", "composable"],
data=rows,
title="summary of log files",
)
return table
@property
def describe(self):
"""returns tables describing content types"""
lock_id = _db_lockid(self.source)
if lock_id:
title = (
f"Locked db store. Locked to pid={lock_id}, current pid={os.getpid()}"
)
else:
title = "Unlocked db store."
num_incomplete = len(self.incomplete)
num_complete = len(self.members)
num_logs = len(self.logs)
summary = Table(
header=["record type", "number"],
data=[
["completed", num_complete],
["incomplete", num_incomplete],
["logs", num_logs],
],
title=title,
)
return summary
class WritableTinyDbDataStore(ReadOnlyTinyDbDataStore, WritableDataStoreBase):
def __init__(self, *args, **kwargs):
if_exists = kwargs.pop("if_exists", RAISE)
create = kwargs.pop("create", True)
ReadOnlyTinyDbDataStore.__init__(self, *args, **kwargs)
WritableDataStoreBase.__init__(self, if_exists=if_exists, create=create)
def _source_create_delete(self, if_exists, create):
if _db_lockid(self.source):
return
exists = os.path.exists(self.source)
dirname = os.path.dirname(self.source)
if exists and if_exists == RAISE:
raise RuntimeError(f"'{self.source}' exists")
elif exists and if_exists == OVERWRITE:
try:
os.remove(self.source)
except PermissionError:
# probably user accidentally created a directory
shutil.rmtree(self.source)
elif dirname and not os.path.exists(dirname) and not create:
raise RuntimeError(f"'{dirname}' does not exist")
if create and dirname:
os.makedirs(dirname, exist_ok=True)
@extend_docstring_from(WritableDataStoreBase.write)
def write(self, identifier, data):
matches = self.filtered(identifier)
if matches:
return matches[0]
relative_id = self.get_relative_identifier(identifier)
record = make_record_for_json(relative_id, data, True)
doc_id = self.db.insert(record)
member = DataStoreMember(relative_id, self, id=doc_id)
if relative_id.endswith(self.suffix):
self._members.append(member)
return member
def write_incomplete(self, identifier, not_completed):
"""stores an incomplete result object"""
from .composable import NotCompleted
matches = self.filtered(identifier)
if matches:
return matches[0]
relative_id = self.get_relative_identifier(identifier)
record = make_record_for_json(relative_id, not_completed, False)
doc_id = self.db.insert(record)
member = DataStoreMember(relative_id, self, id=doc_id)
return member
def add_file(self, path, make_unique=True, keep_suffix=True, cleanup=False):
"""
Parameters
----------
path : str
location of file to be added to the data store
keep_suffix : bool
new path will retain the suffix of the provided file
make_unique : bool
a successive number will be added to the name before the suffix
until the name is unique
cleanup : bool
delete the original
"""
relativeid = self.make_relative_identifier(path)
relativeid = Path(relativeid)
path = Path(path)
if keep_suffix:
relativeid = str(relativeid).replace(
relativeid.suffix, "".join(path.suffixes)
)
relativeid = Path(relativeid)
if relativeid.suffix:
name_wo_suffix = ".".join(relativeid.name.split(".")[:-1])
else:
name_wo_suffix = relativeid
suffixes = "".join(relativeid.suffixes)
query = Query().identifier.matches(f"{name_wo_suffix}*")
num = self.db.count(query)
if num:
num += 1
relativeid = str(relativeid).replace(suffixes, f"-{num}{suffixes}")
relativeid = str(relativeid).replace(suffixes, f"-{num}{suffixes}")
data = path.read_text()
m = self.write(str(relativeid), data)
if cleanup:
path.unlink()
return m
| 32.652612 | 88 | 0.583595 |
4a273fb3ef73b3ff8e1aa3b4c26ee5011bb7b8c9 | 2,894 | py | Python | test1/test1/settings.py | 435236402/dome | 6c433c84b682475e2d2843c57ec5a847bc688405 | [
"MIT"
] | null | null | null | test1/test1/settings.py | 435236402/dome | 6c433c84b682475e2d2843c57ec5a847bc688405 | [
"MIT"
] | null | null | null | test1/test1/settings.py | 435236402/dome | 6c433c84b682475e2d2843c57ec5a847bc688405 | [
"MIT"
] | null | null | null | """
Django settings for test1 project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')n$-!%^u%l@#&*%zszn^p+41*12z$yu^&$__6syeoays_cy1#^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tinymce',
'booktest',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'test1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test1',
'USER': 'root',
'PASSWORD': 'mysql',
'HOST': '192.168.231.134',
'PORT': 3306
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
TINYMCE_DEFAULT_CONFIG = {
'theme': 'advanced',
'width': 600,
'height': 400,
}
| 24.735043 | 71 | 0.678645 |
4a27408c5113ac9308808830efa68fdd6ad58ea5 | 40,960 | py | Python | imagenet/models/pre_resnet_CFandIN_temp.py | LongJin-lab/Nematode-Connectome-Neural-Network | c1fcef110df7d5cfb9fec6a0778b8340e5289ede | [
"MIT"
] | null | null | null | imagenet/models/pre_resnet_CFandIN_temp.py | LongJin-lab/Nematode-Connectome-Neural-Network | c1fcef110df7d5cfb9fec6a0778b8340e5289ede | [
"MIT"
] | null | null | null | imagenet/models/pre_resnet_CFandIN_temp.py | LongJin-lab/Nematode-Connectome-Neural-Network | c1fcef110df7d5cfb9fec6a0778b8340e5289ede | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch
import torch.nn.functional as functional
from torch.nn.parameter import Parameter
import math
from torch.autograd import Variable
import numpy as np
import torch.onnx
import netron
# from init import *
from random import random
import argparse
# __all__ = ['pre_resnet18', 'pre_resnet34', 'pre_resnet50', 'pre_resnet101',
# 'pre_resnet152']
__all__ = ['honet18_in', 'honet34_in', 'honet50_in', 'pre_act_resnet18_in', 'pre_act_resnet34_in', 'pre_act_resnet50_in']
# __all__ = ['HONet34_IN', 'HONet18_IN']
parser = argparse.ArgumentParser(description='PyTorch CIFAR Training')
args = parser.parse_args()
global num_cla
num_cla = 1000
class BasicBlockWithDeathRate(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, death_rate=0., downsample=None):
super(BasicBlockWithDeathRate, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
self.in_planes = in_planes
self.planes = planes
self.death_rate = death_rate
def forward(self, x):
if not self.training or torch.rand(1)[
0] >= self.death_rate: # 2nd condition: death_rate is below the upper bound
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
# ^ the same with Pre-ResNet
if self.training:
out /= (1. - self.death_rate) # out = out/(1. - death_rate) ? maybe it is mutiplied by the rate before
else:
if self.stride == 1:
out = Variable(torch.FloatTensor(x.size()).cuda().zero_(), requires_grad=False)
else:
size = list(x.size())
size[-1] //= 2 # Maybe it is the Height (interger, devide)
size[-2] //= 2 # Maybe it is the Width
size[-3] *= 2 # Maybe Channel
size = torch.Size(size)
out = Variable(torch.FloatTensor(size).cuda().zero_(), requires_grad=False) # all zero tensor
return out
class BasicBlock_cifar(nn.Module): # actually, this is the preact block
expansion = 1
def __init__(self, in_planes, planes, stride=1, downsample=None):
super(BasicBlock_cifar, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
self.in_planes = in_planes
self.planes = planes
def forward(self, x): # Pre-ResNet
out = self.bn1(x) # wo BN
# out = x # wo BN
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
return out
class HOBlock(nn.Module): # actually, this is the preact block
expansion = 1
def __init__(self, in_planes, planes, last_res_planes, l_last_res_planes, stride=1, k_ini=-9.0 / 5, fix_k=False,
stepsize=1, given_ks=[10, 10, 10, 10], downsample=None):
super(HOBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
# self.bn3 = nn.BatchNorm2d(planes)# 20210803
self.stride = stride
self.in_planes = in_planes
self.planes = planes
self.last_res_planes = last_res_planes
self.l_last_res_planes = l_last_res_planes
self.stepsize = stepsize
self.fix_k = fix_k
if self.fix_k:
self.k = k_ini
self.a_0 = float(given_ks[0])
self.a_1 = float(given_ks[1])
self.a_2 = float(given_ks[2])
self.b_0 = float(given_ks[3])
else:
self.k = nn.Parameter(torch.Tensor(1).uniform_(k_ini, k_ini))
# self.ks = nn.ParameterList(torch.Tensor(1).uniform_(1.0, 1.1))
# print('l_last_res_planes, last_res_planes, in_planes, planes', l_last_res_planes, last_res_planes, in_planes, planes)
if not (self.last_res_planes == -1 or self.l_last_res_planes == -1):
# if 1:
if self.planes == 32:
if in_planes == 16:
self.downsample_16_32_x = Downsample_clean(16, 32, 2)
# print('downsample_16_32_x')
if self.last_res_planes == 16:
self.downsample_16_32_l = Downsample_clean(16, 32, 2)
# print('downsample_16_32_l')
if self.l_last_res_planes == 16:
self.downsample_16_32_ll = Downsample_clean(16, 32, 2)
# print('downsample_16_32_ll')
if self.planes == 64:
if self.in_planes == 32:
self.downsample_32_64_x = Downsample_clean(32, 64, 2)
if self.last_res_planes == 32:
self.downsample_32_64_l = Downsample_clean(32, 64, 2)
if self.l_last_res_planes == 32:
self.downsample_32_64_ll = Downsample_clean(32, 64, 2)
if self.planes == 128:
if self.in_planes == 64:
self.downsample_64_128_x = Downsample_clean(64, 128, 2)
if self.last_res_planes == 64:
self.downsample_64_128_l = Downsample_clean(64, 128, 2)
if self.l_last_res_planes == 64:
self.downsample_64_128_ll = Downsample_clean(64, 128, 2)
if self.planes == 256:
if self.in_planes == 128:
self.downsample_128_256_x = Downsample_clean(128, 256, 2)
if self.last_res_planes == 128:
self.downsample_128_256_l = Downsample_clean(128, 256, 2)
if self.l_last_res_planes == 128:
self.downsample_128_256_ll = Downsample_clean(128, 256, 2)
def forward(self, x, last_res, l_last_res): # Pre-ResNet
residual = x
F_x_n = self.bn1(x) # wo BN
# F_x_n=x
F_x_n = self.relu(F_x_n)
F_x_n = self.conv1(F_x_n)
F_x_n = self.bn2(F_x_n)
F_x_n = self.relu(F_x_n)
F_x_n = self.conv2(F_x_n)
# if not (isinstance(last_res,int) or isinstance(l_last_res,int)):
# print('F_x_n.size(), residual.size(),last_res.size(),l_last_res.size()', F_x_n.size()[1], residual.size()[1],last_res.size()[1],l_last_res.size()[1])
# print('planes, in_planes, last_res_planes, l_last_res_planes', self.planes, self.in_planes, self.last_res_planes, self.l_last_res_planes)
if not (isinstance(last_res, int) or isinstance(l_last_res, int)):
# print('HO')
# if 1:
if self.planes == 32:
if self.in_planes == 16:
residual = self.downsample_16_32_x(residual)
# print('residual.size()', residual.size())
if self.last_res_planes == 16:
last_res = self.downsample_16_32_l(last_res)
# print('last_res.size()', last_res.size())
if self.l_last_res_planes == 16:
l_last_res = self.downsample_16_32_ll(l_last_res)
# print('l_last_res.size()', l_last_res.size())
if self.planes == 64:
if self.in_planes == 32:
residual = self.downsample_32_64_x(residual)
if self.last_res_planes == 32:
last_res = self.downsample_32_64_l(last_res)
if self.l_last_res_planes == 32:
l_last_res = self.downsample_32_64_ll(l_last_res)
if self.planes == 128:
if self.in_planes == 64:
residual = self.downsample_64_128_x(residual)
if self.last_res_planes == 64:
last_res = self.downsample_64_128_l(last_res)
if self.l_last_res_planes == 64:
l_last_res = self.downsample_64_128_ll(l_last_res)
if self.planes == 256:
if self.in_planes == 128:
residual = self.downsample_128_256_x(residual)
if self.last_res_planes == 128:
last_res = self.downsample_128_256_l(last_res)
if self.l_last_res_planes == 128:
l_last_res = self.downsample_128_256_ll(l_last_res)
if not self.fix_k:
self.b_0 = (3 * self.k - 1) / (self.k * 2)
self.a_0 = (3 * self.k + 3) / (self.k * 4)
self.a_1 = -1 / (self.k)
self.a_2 = (self.k + 1) / (4 * self.k)
# print("trainable")
x = torch.mul(self.stepsize, torch.mul(self.b_0, F_x_n)) + torch.mul(self.a_0, residual) + torch.mul(
self.a_1, last_res) + torch.mul(self.a_2, l_last_res)
# print('x', x[0][0][0][0])
# print("self.a_0, self.a_1, self.a_2, self.b_0", self.a_0, self.a_1, self.a_2, self.b_0)
else:
# print('res')
x = F_x_n
# x = self.bn3(x)
l_last_res = last_res
last_res = residual # x means the residual
# residual = x
return x, last_res, l_last_res, self.k
class GaussianNoise(nn.Module):
def __init__(self, stddev):
super(GaussianNoise, self).__init__()
self.stddev = stddev
def forward(self, x):
if self.training:
return x + torch.autograd.Variable(torch.randn(x.size()).cuda() * self.stddev, requires_grad=False)
return x
class Bottleneck_cifar(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck_cifar, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.in_planes = in_planes
self.planes = planes
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
return out
class HoBottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, last_res_planes, l_last_res_planes, stride=1, k_ini=-9.0 / 5, fix_k=False,
stepsize=1, given_ks=[1.0 / 3, 5.0 / 9, 1.0 / 9, 16.0 / 9]):
super(HoBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.expansion = 4
self.in_planes = in_planes
self.planes = planes * self.expansion
self.last_res_planes = last_res_planes
self.l_last_res_planes = l_last_res_planes
self.stepsize = stepsize
self.fix_k = fix_k
if self.fix_k:
self.k = k_ini
self.a_0 = float(given_ks[0])
self.a_1 = float(given_ks[1])
self.a_2 = float(given_ks[2])
self.b_0 = float(given_ks[3])
else:
self.k = nn.Parameter(torch.Tensor(1).uniform_(k_ini, k_ini))
# self.ks=nn.ParameterList(torch.Tensor(1).uniform_(1.0, 1.1))
# self.downsample_16_64_res = Downsample_clean(16, 64, 1)
# if not (last_res_planes == -1 and l_last_res_planes == -1):
# if 1:
if not (last_res_planes == -1 or l_last_res_planes == -1):
if self.planes == 32:
if in_planes == 16:
self.downsample_16_32_x = Downsample_clean(16, 32, 2)
# print('downsample_16_32_x')
if last_res_planes == 16:
self.downsample_16_32_l = Downsample_clean(16, 32, 2)
# print('downsample_16_32_l')
if l_last_res_planes == 16:
self.downsample_16_32_ll = Downsample_clean(16, 32, 2)
# print('downsample_16_32_ll')
if self.planes == 64:
if self.in_planes == 16:
self.downsample_16_64_x = Downsample_clean(16, 64, 1)
# print('downsample_16_32_x')
if self.last_res_planes == 16:
self.downsample_16_64_l = Downsample_clean(16, 64, 1)
# print('downsample_16_32_l')
if self.l_last_res_planes == 16:
self.downsample_16_64_ll = Downsample_clean(16, 64, 1)
if self.in_planes == 32:
self.downsample_32_64_x = Downsample_clean(32, 64, 2)
if self.last_res_planes == 32:
self.downsample_32_64_l = Downsample_clean(32, 64, 2)
if self.l_last_res_planes == 32:
self.downsample_32_64_ll = Downsample_clean(32, 64, 2)
if self.planes == 128:
if self.in_planes == 64:
self.downsample_64_128_x = Downsample_clean(64, 128, 2)
if self.last_res_planes == 64:
self.downsample_64_128_l = Downsample_clean(64, 128, 2)
if self.l_last_res_planes == 64:
self.downsample_64_128_ll = Downsample_clean(64, 128, 2)
if self.planes == 256:
if self.in_planes == 128:
self.downsample_128_256_x = Downsample_clean(128, 256, 2)
if self.last_res_planes == 128:
self.downsample_128_256_l = Downsample_clean(128, 256, 2)
if self.l_last_res_planes == 128:
self.downsample_128_256_ll = Downsample_clean(128, 256, 2)
def forward(self, x, last_res, l_last_res):
# if self.expansion==4:
# residual = self.downsample_16_64_res(x)
# elif self.expansion==1:
# residual = x
residual = x
F_x_n = self.bn1(x)
F_x_n = self.relu(F_x_n)
F_x_n = self.conv1(F_x_n)
F_x_n = self.bn2(F_x_n)
F_x_n = self.relu(F_x_n)
F_x_n = self.conv2(F_x_n)
F_x_n = self.bn3(F_x_n)
F_x_n = self.relu(F_x_n)
F_x_n = self.conv3(F_x_n)
# self.planes = self.planes*self.expansion
# if not (isinstance(last_res,int) or isinstance(l_last_res,int)):
# print('F_x_n.size(), residual.size(),last_res.size(),l_last_res.size()', F_x_n.size()[1], residual.size()[1],last_res.size()[1],l_last_res.size()[1])
# print('planes, in_planes, last_res_planes, l_last_res_planes', self.planes, self.in_planes, self.last_res_planes, self.l_last_res_planes)
# elif not (isinstance(last_res,int)):
# print('F_x_n.size(), residual.size(),last_res.size(),l_last_res.size()', F_x_n.size()[
# 1], residual.size()[1], last_res.size()[1], l_last_res)
# print('planes, in_planes, last_res_planes, l_last_res_planes', self.planes, self.in_planes, self.last_res_planes, self.l_last_res_planes)
# else:
# print('F_x_n.size(), residual.size(),last_res.size(),l_last_res.size()', F_x_n.size()[1], residual.size()[1],last_res,l_last_res)
# print('planes, in_planes, last_res_planes, l_last_res_planes', self.planes, self.in_planes, self.last_res_planes, self.l_last_res_planes)
if not (isinstance(last_res, int) or isinstance(l_last_res, int)):
# print('HO')
# if 1:
if self.planes == 32:
if self.in_planes == 16:
residual = self.downsample_16_32_x(residual)
# print('residual.size()', residual.size())
if self.last_res_planes == 16:
last_res = self.downsample_16_32_l(last_res)
# print('last_res.size()', last_res.size())
if self.l_last_res_planes == 16:
l_last_res = self.downsample_16_32_ll(l_last_res)
# print('l_last_res.size()', l_last_res.size())
if self.planes == 64:
if self.in_planes == 16:
residual = self.downsample_16_64_x(residual)
if self.last_res_planes == 16:
last_res = self.downsample_16_64_l(last_res)
if self.l_last_res_planes == 16:
l_last_res = self.downsample_16_64_ll(l_last_res)
if self.in_planes == 32:
residual = self.downsample_32_64_x(residual)
if self.last_res_planes == 32:
last_res = self.downsample_32_64_l(last_res)
if self.l_last_res_planes == 32:
l_last_res = self.downsample_32_64_ll(l_last_res)
if self.planes == 128:
if self.in_planes == 64:
residual = self.downsample_64_128_x(residual)
if self.last_res_planes == 64:
last_res = self.downsample_64_128_l(last_res)
if self.l_last_res_planes == 64:
l_last_res = self.downsample_64_128_ll(l_last_res)
if self.planes == 256:
if self.in_planes == 128:
residual = self.downsample_128_256_x(residual)
if self.last_res_planes == 128:
last_res = self.downsample_128_256_l(last_res)
if self.l_last_res_planes == 128:
l_last_res = self.downsample_128_256_ll(l_last_res)
if not (isinstance(last_res, int) or isinstance(l_last_res, int)):
if not self.fix_k:
self.b_0 = (3 * self.k - 1) / (self.k * 2)
self.a_0 = (3 * self.k + 3) / (self.k * 4)
self.a_1 = -1 / (self.k)
self.a_2 = (self.k + 1) / (4 * self.k)
# x = torch.mul(b_0, F_x_n) + torch.mul(a_0, residual) + torch.mul(a_1, last_res) + torch.mul(a_2, l_last_res)
x = torch.mul(self.stepsize, torch.mul(self.b_0, F_x_n)) + torch.mul(self.a_0, residual) + torch.mul(
self.a_1, last_res) + torch.mul(self.a_2, l_last_res)
else:
# print('res')
x = F_x_n
l_last_res = last_res
last_res = residual # x means the residual
# residual = x
# print('x.sixe()[1], residual.size()[1]', x.size()[1], residual.size()[1])
return x, last_res, l_last_res, self.k
class Downsample(nn.Module): # ReLU and BN are involved in this downsample
def __init__(self, in_planes, out_planes, stride=2):
super(Downsample, self).__init__()
self.downsample = nn.Sequential(
nn.BatchNorm2d(in_planes),
nn.ReLU(inplace=True),
nn.Conv2d(in_planes, out_planes,
kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
x = self.downsample(x)
return x
class Downsample_clean(nn.Module): # ReLU and BN are involved in this downsample
def __init__(self, in_planes, out_planes, stride=2):
super(Downsample_clean, self).__init__()
self.downsample_ = nn.Sequential(
# nn.BatchNorm2d(in_planes),
# nn.ReLU(inplace=True),
nn.Conv2d(in_planes, out_planes,
kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
x = self.downsample_(x)
return x
class Downsample_real(nn.Module): # ReLU and BN are not involved in this downsample
def __init__(self, in_shape, out_shape):
super(Downsample_real, self).__init__()
# in_shape = x.shape()
self.in_planes = in_shape[1]
self.out_planes = out_shape[1]
self.stride = int(in_shape[2] / out_shape[2])
# [256, 64, 32, 32]->[256, 128, 16, 16]
self.downsample_real = nn.Sequential(
# nn.BatchNorm2d(in_planes),
# nn.ReLU(inplace=True),
nn.Conv2d(self.in_planes, self.out_planes,
kernel_size=1, stride=self.stride, bias=False)
)
def forward(self, x):
x = self.downsample_real(x)
return x
class MResNet(nn.Module):
# def __init__(self,block,layers,pretrain=True,num_classes=num_cla,stochastic_depth=False,PL=0.5,noise_level=0.001,noise=False):
def __init__(self, block, layers, pretrain=False, num_classes=num_cla, stochastic_depth=False, PL=1.0,
noise_level=0.001, noise=False):
self.in_planes = 16
self.planes = [16, 32, 64]
self.strides = [1, 2, 2]
super(MResNet, self).__init__()
self.noise = noise # what for?
self.block = block
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.pretrain = pretrain
self.ks = nn.ParameterList([nn.Parameter(torch.Tensor(1).uniform_(1.0, 1.1)) for i in
range(layers[0] + layers[1] + layers[2])]) # each layer has a trainable $k_n$
self.stochastic_depth = stochastic_depth
blocks = []
n = layers[0] + layers[1] + layers[2]
if not self.stochastic_depth:
for i in range(3):
blocks.append(block(self.in_planes, self.planes[i], self.strides[i]))
self.in_planes = self.planes[i] * block.expansion
for j in range(1, layers[
i]): # Recalling "MResNet(BasicBlock,[3,3,3],**kwargs)", and "layers" is assigned as "[3,3,3]"; then j is 0 to 2
blocks.append(block(self.in_planes, self.planes[i])) # three (Basic) Blocks
else: # with death_rates
death_rates = [i / (n - 1) * (1 - PL) for i in range(n)] # n is the sum of elements of "[3,3,3]"
# print(death_rates)
for i in range(3):
blocks.append(block(self.in_planes, self.planes[i], self.strides[i],
death_rate=death_rates[i * layers[0]])) # note that layers[k] == layers[j]
self.in_planes = self.planes[i] * block.expansion
for j in range(1, layers[i]):
blocks.append(block(self.in_planes, self.planes[i], death_rate=death_rates[i * layers[0] + j]))
self.blocks = nn.ModuleList(blocks) # ModuleList cannot determine the sequence of layers
self.downsample1 = Downsample(16, 64, stride=1) # Downsample: (in_planes,out_planes,stride=2):
# self.downsample1=nn.Conv2d(16, 64,
# kernel_size=1, stride=1, bias=False)
self.downsample21 = Downsample(16 * block.expansion,
32 * block.expansion) # "expansion" is 1 for BasicBlocks and is 4 for the Bottleneck
# self.downsample22=Downsample(16*block.expansion,32*block.expansion)
self.downsample31 = Downsample(32 * block.expansion, 64 * block.expansion)
# self.downsample32=Downsample(32*block.expansion,64*block.expansion)
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules(): # initialization
if isinstance(m, nn.Conv2d): # if m is a conv
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels # element num of the kernel
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def change_state(self):
self.pretrain = not self.pretrain
def forward(self, x):
x = self.conv1(x)
# x=self.bn1(x)
# x=self.relu(x)
if self.block.expansion == 4: # 4 is the "expansion" of the "Bottleneck". If "Bottleneck" is used, we need to downsample
residual = self.downsample1(x) # residual.size()[1]: 16->64
else:
residual = x
x = self.blocks[0](x) + residual # x.size()[1]: 16->64
last_res = residual
for i, b in enumerate(self.blocks): # index and content
if i == 0:
continue
residual = x
if b.in_planes != b.planes * b.expansion: # sizes of the input and output are not the same
if b.planes == 32:
residual = self.downsample21(x)
# if not self.pretrain:
# last_res=self.downsample22(last_res)
elif b.planes == 64:
residual = self.downsample31(x)
# if not self.pretrain:
# last_res=self.downsample32(last_res)
x = b(x)
# print(x.size())
# print(residual.size())
x += residual
elif self.pretrain: #
x = b(x) + residual
else: # in.channel = out.channel and not pretrain
x = b(x) + self.ks[i].expand_as(residual) * residual + (1 - self.ks[i]).expand_as(
last_res) * last_res # "B.expand_as (A)": expand B in A's shape
last_res = residual
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x, self.ks
class HONet_v2(nn.Module):
def __init__(self, block, layers, k_ini=-9.0 / 5, pretrain=False, num_classes=num_cla, stochastic_depth=False,
PL=1.0, noise_level=0.001,
noise=False):
self.in_planes = 16
self.planes = [16, 32, 64]
self.last_res_planes = -1
self.l_last_res_planes = -1
self.strides = [1, 2, 2]
super(HONet_v2, self).__init__()
self.noise = noise # what for?
self.block = block
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.pretrain = pretrain
self.stochastic_depth = stochastic_depth
self.k_ini = k_ini
# self.stepsize =nn.Parameter(torch.Tensor(1).uniform_(1, 1))
blocks = []
self.ks = []
n = layers[0] + layers[1] + layers[2]
l = 0
if not self.stochastic_depth:
for i in range(3): # there are 3 elements in the list like [7,7,7]
# print('v2: self.planes[i], self.in_planes, self.last_res_planes, self.l_last_res_planes', self.planes[i]* block.expansion, self.in_planes, self.last_res_planes, self.l_last_res_planes)
blocks.append(
block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes, self.strides[i],
k_ini=self.k_ini))
# ###
# if
#
# ###
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
if l == 0 or l == 1:
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
else:
self.l_last_res_planes = self.planes[i] * block.expansion
self.last_res_planes = self.planes[i] * block.expansion
self.in_planes = self.planes[i] * block.expansion
l += 1
# print('l', l)
# print('i', i)
for j in range(1, layers[
i]): # Recalling "MResNet(BasicBlock,[3,3,3],**kwargs)", and "layers" is assigned as "[3,3,3]"; then j is 1 to 2
# if l == 0:
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
#
# elif l==1:
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
# else:
# self.l_last_res_planes = self.planes[i]*block.expansion
# self.last_res_planes = self.planes[i]*block.expansion
# self.plane = self.planes[i]*block.expansion
# print('j', j)
# print('v2: self.planes[i], self.in_planes, self.last_res_planes, self.l_last_res_planes', self.planes[i]* block.expansion, self.in_planes, self.last_res_planes, self.l_last_res_planes)
blocks.append(block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes,
k_ini=self.k_ini)) # three (Basic) Blocks
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
if l == 0 or l == 1:
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
else:
self.l_last_res_planes = self.planes[i] * block.expansion
self.last_res_planes = self.planes[i] * block.expansion
l += 1
# print('l', l)
else: # with death_rates
death_rates = [i / (n - 1) * (1 - PL) for i in range(n)] # n is the sum of elements of "[3,3,3]"
# print(death_rates)
for i in range(3):
blocks.append(
block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes, self.strides[i],
k_ini=self.k_ini, death_rate=death_rates[i * layers[0]])) # note that layers[k] == layers[j]
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
self.in_planes = self.planes[i] * block.expansion
# print('i', i)
for j in range(1, layers[i]):
# print('j', j)
blocks.append(block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes,
k_ini=self.k_ini, death_rate=death_rates[i * layers[0] + j]))
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
self.blocks = nn.ModuleList(blocks) # ModuleList cannot determine the sequence of layers
self.downsample1 = Downsample(16, 64, stride=1) # Downsample: (in_planes,out_planes,stride):
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules(): # initialization
if isinstance(m, nn.Conv2d): # if m is a conv
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels # element num of the kernel
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def change_state(self):
self.pretrain = not self.pretrain
def forward(self, x):
self.ks = []
x = self.conv1(x)
last_res = -1
l_last_res = -1
# x=self.bn1(x)
# x=self.relu(x)
if self.block.expansion == 4: # 4 is the "expansion" of the "Bottleneck". If "Bottleneck" is used, we need to downsample
residual = self.downsample1(x)
# print('downsample1')
else:
residual = x
x, last_res, l_last_res, k = self.blocks[0](x, last_res, l_last_res)
# print('v2: x.sixe()[1], residual.size()[1]', x.size()[1], residual.size()[1])
x += residual
# l_last_res = residual
residual = x
x, last_res, l_last_res, k = self.blocks[1](x, last_res, l_last_res)
# x = self.blocks[1](x)[0] + residual
x += residual
# last_res = residual
# residual = x # moved from below. Flag:318
### \end
for i, b in enumerate(self.blocks): # index and content
if i == 0 or i == 1:
# print('i', i)
continue
residual = x # moved up. Flag:318
####
# if b.in_planes != b.planes * b.expansion: # sizes of the input and output are not the same
# if b.planes == 32:
# residual = self.downsample21(x)
# # if not self.pretrain:
# # last_res=self.downsample22(last_res)
# elif b.planes == 64:
# residual = self.downsample31(x)
#
# x = b(x)
# # print(x.size())
# # print(residual.size())
# x += residual
####
if self.pretrain: #
x = b(x) + residual
else: # in.channel = out.channel and not pretrain
# \begin HONet core
x, last_res, l_last_res, k = b(x, last_res, l_last_res)
self.ks += k.data
# print('i, ks', i, self.ks)
# \end HONet core
# print('cnt', cnt1, cnt2, cnt3, cnt4)
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
# print('out')
return x, self.ks
class HONet_stepsize(nn.Module):
def __init__(self, block, layers, k_ini=-9.0 / 5, pretrain=False, num_classes=num_cla, stochastic_depth=False,
PL=1.0, noise_level=0.001,
noise=False, dataset='cifar10'):
self.in_planes = 16
self.planes = [16, 32, 64]
self.last_res_planes = -1
self.l_last_res_planes = -1
self.strides = [1, 2, 2]
super(HONet_stepsize, self).__init__()
self.noise = noise # what for?
self.block = block
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.pretrain = pretrain
self.stochastic_depth = stochastic_depth
self.k_ini = k_ini
self.stepsize = nn.Parameter(torch.Tensor(1).uniform_(1, 1))
blocks = []
self.ks = []
n = layers[0] + layers[1] + layers[2]
l = 0
if not self.stochastic_depth:
for i in range(3): # there are 3 elements in the list like [7,7,7]
# print('v2: self.planes[i], self.in_planes, self.last_res_planes, self.l_last_res_planes', self.planes[i]* block.expansion, self.in_planes, self.last_res_planes, self.l_last_res_planes)
blocks.append(
block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes, self.strides[i],
k_ini=self.k_ini, stepsize=self.stepsize))
# ###
# if
#
# ###
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
if l == 0 or l == 1:
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
else:
self.l_last_res_planes = self.planes[i] * block.expansion
self.last_res_planes = self.planes[i] * block.expansion
self.in_planes = self.planes[i] * block.expansion
l += 1
# print('l', l)
# print('i', i)
for j in range(1, layers[
i]): # Recalling "MResNet(BasicBlock,[3,3,3],**kwargs)", and "layers" is assigned as "[3,3,3]"; then j is 1 to 2
# if l == 0:
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
#
# elif l==1:
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
# else:
# self.l_last_res_planes = self.planes[i]*block.expansion
# self.last_res_planes = self.planes[i]*block.expansion
# self.plane = self.planes[i]*block.expansion
# print('j', j)
# print('v2: self.planes[i], self.in_planes, self.last_res_planes, self.l_last_res_planes', self.planes[i]* block.expansion, self.in_planes, self.last_res_planes, self.l_last_res_planes)
blocks.append(block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes,
k_ini=self.k_ini, stepsize=self.stepsize)) # three (Basic) Blocks
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
if l == 0 or l == 1:
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
else:
self.l_last_res_planes = self.planes[i] * block.expansion
self.last_res_planes = self.planes[i] * block.expansion
l += 1
# print('l', l)
else: # with death_rates
death_rates = [i / (n - 1) * (1 - PL) for i in range(n)] # n is the sum of elements of "[3,3,3]"
# print(death_rates)
for i in range(3):
blocks.append(
block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes, self.strides[i],
k_ini=self.k_ini, stepsize=self.stepsize,
death_rate=death_rates[i * layers[0]])) # note that layers[k] == layers[j]
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
self.in_planes = self.planes[i] * block.expansion
# print('i', i)
for j in range(1, layers[i]):
# print('j', j)
blocks.append(block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes,
k_ini=self.k_ini, stepsize=self.stepsize,
death_rate=death_rates[i * layers[0] + j]))
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
self.blocks = nn.ModuleList(blocks) # ModuleList cannot determine the sequence of layers
self.downsample1 = Downsample(16, 64, stride=1) # Downsample: (in_planes,out_planes,stride):
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules(): # initialization
if isinstance(m, nn.Conv2d): # if m is a conv
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels # element num of the kernel
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def change_state(self):
self.pretrain = not self.pretrain
def forward(self, x):
| 46.439909 | 207 | 0.55293 |
4a2742170ed4fd25a7774bc7142b7a73d279b213 | 32,602 | py | Python | old_projects/hilbert/section2.py | rorik/manim | 1fa9ba56eabaf189ef40278ab6cd894fad257776 | [
"MIT"
] | null | null | null | old_projects/hilbert/section2.py | rorik/manim | 1fa9ba56eabaf189ef40278ab6cd894fad257776 | [
"MIT"
] | null | null | null | old_projects/hilbert/section2.py | rorik/manim | 1fa9ba56eabaf189ef40278ab6cd894fad257776 | [
"MIT"
] | null | null | null | from big_ol_pile_of_manim_imports import *
import displayer as disp
from hilbert.curves import \
TransformOverIncreasingOrders, FlowSnake, HilbertCurve, \
SnakeCurve, PeanoCurve
from hilbert.section1 import get_mathy_and_bubble
from scipy.spatial.distance import cdist
def get_time_line():
length = 2.6*FRAME_WIDTH
year_range = 400
time_line = NumberLine(
numerical_radius = year_range/2,
unit_length_to_spatial_width = length/year_range,
tick_frequency = 10,
leftmost_tick = 1720,
number_at_center = 1870,
numbers_with_elongated_ticks = list(range(1700, 2100, 100))
)
time_line.sort_points(lambda p : p[0])
time_line.set_color_by_gradient(
PeanoCurve.CONFIG["start_color"],
PeanoCurve.CONFIG["end_color"]
)
time_line.add_numbers(
2020, *list(range(1800, 2050, 50))
)
return time_line
class SectionTwo(Scene):
def construct(self):
self.add(TextMobject("Section 2: Filling space"))
self.wait()
class HilbertCurveIsPerfect(Scene):
def construct(self):
curve = HilbertCurve(order = 6)
curve.set_color(WHITE)
colored_curve = curve.copy()
colored_curve.thin_out(3)
lion = ImageMobject("lion", invert = False)
lion.replace(curve, stretch = True)
sparce_lion = lion.copy()
sparce_lion.thin_out(100)
distance_matrix = cdist(colored_curve.points, sparce_lion.points)
closest_point_indices = np.apply_along_axis(
np.argmin, 1, distance_matrix
)
colored_curve.rgbas = sparce_lion.rgbas[closest_point_indices]
line = Line(5*LEFT, 5*RIGHT)
Mobject.align_data(line, colored_curve)
line.rgbas = colored_curve.rgbas
self.add(lion)
self.play(ShowCreation(curve, run_time = 3))
self.play(
FadeOut(lion),
Transform(curve, colored_curve),
run_time = 3
)
self.wait()
self.play(Transform(curve, line, run_time = 5))
self.wait()
class AskMathematicianFriend(Scene):
def construct(self):
mathy, bubble = get_mathy_and_bubble()
bubble.sort_points(lambda p : np.dot(p, UP+RIGHT))
self.add(mathy)
self.wait()
self.play(ApplyMethod(
mathy.blink,
rate_func = squish_rate_func(there_and_back)
))
self.wait()
self.play(ShowCreation(bubble))
self.wait()
self.play(
ApplyMethod(mathy.shift, 3*(DOWN+LEFT)),
ApplyPointwiseFunction(
lambda p : 15*p/get_norm(p),
bubble
),
run_time = 3
)
class TimeLineAboutSpaceFilling(Scene):
def construct(self):
curve = PeanoCurve(order = 5)
curve.stretch_to_fit_width(FRAME_WIDTH)
curve.stretch_to_fit_height(FRAME_HEIGHT)
curve_start = curve.copy()
curve_start.apply_over_attr_arrays(
lambda arr : arr[:200]
)
time_line = get_time_line()
time_line.shift(-time_line.number_to_point(2000))
self.add(time_line)
self.play(ApplyMethod(
time_line.shift,
-time_line.number_to_point(1900),
run_time = 3
))
brace = Brace(
Mobject(
Point(time_line.number_to_point(1865)),
Point(time_line.number_to_point(1888)),
),
UP
)
words = TextMobject("""
Cantor drives himself (and the \\\\
mathematical community at large) \\\\
crazy with research on infinity.
""")
words.next_to(brace, UP)
self.play(
GrowFromCenter(brace),
ShimmerIn(words)
)
self.wait()
self.play(
Transform(time_line, curve_start),
FadeOut(brace),
FadeOut(words)
)
self.play(ShowCreation(
curve,
run_time = 5,
rate_func = None
))
self.wait()
class NotPixelatedSpace(Scene):
def construct(self):
grid = Grid(64, 64)
space_region = Region()
space_mobject = MobjectFromRegion(space_region, DARK_GREY)
curve = PeanoCurve(order = 5).replace(space_mobject)
line = Line(5*LEFT, 5*RIGHT)
line.set_color_by_gradient(curve.start_color, curve.end_color)
for mob in grid, space_mobject:
mob.sort_points(get_norm)
infinitely = TextMobject("Infinitely")
detailed = TextMobject("detailed")
extending = TextMobject("extending")
detailed.next_to(infinitely, RIGHT)
extending.next_to(infinitely, RIGHT)
Mobject(extending, infinitely, detailed).center()
arrows = Mobject(*[
Arrow(2*p, 4*p)
for theta in np.arange(np.pi/6, 2*np.pi, np.pi/3)
for p in [rotate_vector(RIGHT, theta)]
])
self.add(grid)
self.wait()
self.play(Transform(grid, space_mobject, run_time = 5))
self.remove(grid)
self.set_color_region(space_region, DARK_GREY)
self.wait()
self.add(infinitely, detailed)
self.wait()
self.play(DelayByOrder(Transform(detailed, extending)))
self.play(ShowCreation(arrows))
self.wait()
self.clear()
self.set_color_region(space_region, DARK_GREY)
self.play(ShowCreation(line))
self.play(Transform(line, curve, run_time = 5))
class HistoryOfDiscover(Scene):
def construct(self):
time_line = get_time_line()
time_line.shift(-time_line.number_to_point(1900))
hilbert_curve = HilbertCurve(order = 3)
peano_curve = PeanoCurve(order = 2)
for curve in hilbert_curve, peano_curve:
curve.scale(0.5)
hilbert_curve.to_corner(DOWN+RIGHT)
peano_curve.to_corner(UP+LEFT)
squares = Mobject(*[
Square(side_length=3, color=WHITE).replace(curve)
for curve in (hilbert_curve, peano_curve)
])
self.add(time_line)
self.wait()
for year, curve, vect, text in [
(1890, peano_curve, UP, "Peano Curve"),
(1891, hilbert_curve, DOWN, "Hilbert Curve"),
]:
point = time_line.number_to_point(year)
point[1] = 0.2
arrow = Arrow(point+2*vect, point, buff = 0.1)
arrow.set_color_by_gradient(curve.start_color, curve.end_color)
year_mob = TexMobject(str(year))
year_mob.next_to(arrow, vect)
words = TextMobject(text)
words.next_to(year_mob, vect)
self.play(
ShowCreation(arrow),
ShimmerIn(year_mob),
ShimmerIn(words)
)
self.play(ShowCreation(curve))
self.wait()
self.play(ShowCreation(squares))
self.wait()
self.play(ApplyMethod(
Mobject(*self.mobjects).shift, 20*(DOWN+RIGHT)
))
class DefinitionOfCurve(Scene):
def construct(self):
start_words = TextMobject([
"``", "Space Filling", "Curve ''",
]).to_edge(TOP, buff = 0.25)
quote, space_filling, curve_quote = start_words.copy().split()
curve_quote.shift(
space_filling.get_left()-\
curve_quote.get_left()
)
space_filling = Point(space_filling.get_center())
end_words = Mobject(*[
quote, space_filling, curve_quote
]).center().to_edge(TOP, buff = 0.25)
space_filling_fractal = TextMobject("""
``Space Filling Fractal''
""").to_edge(UP)
curve = HilbertCurve(order = 2).shift(DOWN)
fine_curve = HilbertCurve(order = 8)
fine_curve.replace(curve)
dots = Mobject(*[
Dot(
curve.points[int(n*curve.get_num_points()/15)],
color = YELLOW_C
)
for n in range(1, 15)
if n not in [4, 11]
])
start_words.shift(2*(UP+LEFT))
self.play(
ApplyMethod(start_words.shift, 2*(DOWN+RIGHT))
)
self.wait()
self.play(Transform(start_words, end_words))
self.wait()
self.play(ShowCreation(curve))
self.wait()
self.play(ShowCreation(
dots,
run_time = 3,
))
self.wait()
self.clear()
self.play(ShowCreation(fine_curve, run_time = 5))
self.wait()
self.play(ShimmerIn(space_filling_fractal))
self.wait()
class PseudoHilbertCurvesDontFillSpace(Scene):
def construct(self):
curve = HilbertCurve(order = 1)
grid = Grid(2, 2, stroke_width=1)
self.add(grid, curve)
for order in range(2, 6):
self.wait()
new_grid = Grid(2**order, 2**order, stroke_width=1)
self.play(
ShowCreation(new_grid),
Animation(curve)
)
self.remove(grid)
grid = new_grid
self.play(Transform(
curve, HilbertCurve(order = order)
))
square = Square(side_length = 6, color = WHITE)
square.corner = Mobject1D()
square.corner.add_line(3*DOWN, ORIGIN)
square.corner.add_line(ORIGIN, 3*RIGHT)
square.digest_mobject_attrs()
square.scale(2**(-5))
square.corner.set_color(
Color(rgb = curve.rgbas[int(curve.get_num_points()/3)])
)
square.shift(
grid.get_corner(UP+LEFT)-\
square.get_corner(UP+LEFT)
)
self.wait()
self.play(
FadeOut(grid),
FadeOut(curve),
FadeIn(square)
)
self.play(
ApplyMethod(square.replace, grid)
)
self.wait()
class HilbertCurveIsLimit(Scene):
def construct(self):
mathy, bubble = get_mathy_and_bubble()
bubble.write(
"A Hilbert curve is the \\\\ limit of all these \\dots"
)
self.add(mathy, bubble)
self.play(ShimmerIn(bubble.content))
self.wait()
class DefiningCurves(Scene):
def construct(self):
words = TextMobject(
["One does not simply define the limit \\\\ \
of a sequence of","curves","\\dots"]
)
top_words = TextMobject([
"curves", "are functions"
]).to_edge(UP)
curves1 = words.split()[1]
curves2 = top_words.split()[0]
words.ingest_submobjects()
number = TexMobject("0.27")
pair = TexMobject("(0.53, 0.02)")
pair.next_to(number, buff = 2)
arrow = Arrow(number, pair)
Mobject(number, arrow, pair).center().shift(UP)
number_line = UnitInterval()
number_line.stretch_to_fit_width(5)
number_line.to_edge(LEFT).shift(DOWN)
grid = Grid(4, 4).scale(0.4)
grid.next_to(number_line, buff = 2)
low_arrow = Arrow(number_line, grid)
self.play(ShimmerIn(words))
self.wait()
self.play(
FadeOut(words),
ApplyMethod(curves1.replace, curves2),
ShimmerIn(top_words.split()[1])
)
self.wait()
self.play(FadeIn(number))
self.play(ShowCreation(arrow))
self.play(FadeIn(pair))
self.wait()
self.play(ShowCreation(number_line))
self.play(ShowCreation(low_arrow))
self.play(ShowCreation(grid))
self.wait()
class PseudoHilbertCurveAsFunctionExample(Scene):
args_list = [(2,), (3,)]
# For subclasses to turn args in the above
# list into stings which can be appended to the name
@staticmethod
def args_to_string(order):
return "Order%d"%order
@staticmethod
def string_to_args(order_str):
return int(order_str)
def construct(self, order):
if order == 2:
result_tex = "(0.125, 0.75)"
elif order == 3:
result_tex = "(0.0758, 0.6875)"
phc, arg, result = TexMobject([
"\\text{PHC}_%d"%order,
"(0.3)",
"= %s"%result_tex
]).to_edge(UP).split()
function = TextMobject("Function", size = "\\normal")
function.shift(phc.get_center()+DOWN+2*LEFT)
function_arrow = Arrow(function, phc)
line = Line(5*LEFT, 5*RIGHT)
curve = HilbertCurve(order = order)
line.match_colors(curve)
grid = Grid(2**order, 2**order)
grid.fade()
for mob in curve, grid:
mob.scale(0.7)
index = int(0.3*line.get_num_points())
dot1 = Dot(line.points[index])
arrow1 = Arrow(arg, dot1, buff = 0.1)
dot2 = Dot(curve.points[index])
arrow2 = Arrow(result.get_bottom(), dot2, buff = 0.1)
self.add(phc)
self.play(
ShimmerIn(function),
ShowCreation(function_arrow)
)
self.wait()
self.remove(function_arrow, function)
self.play(ShowCreation(line))
self.wait()
self.play(
ShimmerIn(arg),
ShowCreation(arrow1),
ShowCreation(dot1)
)
self.wait()
self.remove(arrow1)
self.play(
FadeIn(grid),
Transform(line, curve),
Transform(dot1, dot2),
run_time = 2
)
self.wait()
self.play(
ShimmerIn(result),
ShowCreation(arrow2)
)
self.wait()
class ContinuityRequired(Scene):
def construct(self):
words = TextMobject([
"A function must be",
"\\emph{continuous}",
"if it is to represent a curve."
])
words.split()[1].set_color(YELLOW_C)
self.add(words)
self.wait()
class FormalDefinitionOfContinuity(Scene):
def construct(self):
self.setup()
self.label_spaces()
self.move_dot()
self.label_jump()
self.draw_circles()
self.vary_circle_sizes()
self.discontinuous_point()
def setup(self):
self.input_color = YELLOW_C
self.output_color = RED
def spiril(t):
theta = 2*np.pi*t
return t*np.cos(theta)*RIGHT+t*np.sin(theta)*UP
self.spiril1 = ParametricFunction(
lambda t : 1.5*RIGHT + DOWN + 2*spiril(t),
density = 5*DEFAULT_POINT_DENSITY_1D,
)
self.spiril2 = ParametricFunction(
lambda t : 5.5*RIGHT + UP - 2*spiril(1-t),
density = 5*DEFAULT_POINT_DENSITY_1D,
)
Mobject.align_data(self.spiril1, self.spiril2)
self.output = Mobject(self.spiril1, self.spiril2)
self.output.ingest_submobjects()
self.output.set_color(GREEN_A)
self.interval = UnitInterval()
self.interval.set_width(FRAME_X_RADIUS-1)
self.interval.to_edge(LEFT)
self.input_dot = Dot(color = self.input_color)
self.output_dot = self.input_dot.copy().set_color(self.output_color)
left, right = self.interval.get_left(), self.interval.get_right()
self.input_homotopy = lambda x_y_z_t : (x_y_z_t[0], x_y_z_t[1], x_y_z_t[3]) + interpolate(left, right, x_y_z_t[3])
output_size = self.output.get_num_points()-1
output_points = self.output.points
self.output_homotopy = lambda x_y_z_t1 : (x_y_z_t1[0], x_y_z_t1[1], x_y_z_t1[2]) + output_points[int(x_y_z_t1[3]*output_size)]
def get_circles_and_points(self, min_input, max_input):
input_left, input_right = [
self.interval.number_to_point(num)
for num in (min_input, max_input)
]
input_circle = Circle(
radius = get_norm(input_left-input_right)/2,
color = WHITE
)
input_circle.shift((input_left+input_right)/2)
input_points = Line(
input_left, input_right,
color = self.input_color
)
output_points = Mobject(color = self.output_color)
n = self.output.get_num_points()
output_points.add_points(
self.output.points[int(min_input*n):int(max_input*n)]
)
output_center = output_points.points[int(0.5*output_points.get_num_points())]
max_distance = get_norm(output_center-output_points.points[-1])
output_circle = Circle(
radius = max_distance,
color = WHITE
)
output_circle.shift(output_center)
return (
input_circle,
input_points,
output_circle,
output_points
)
def label_spaces(self):
input_space = TextMobject("Input Space")
input_space.to_edge(UP)
input_space.shift(LEFT*FRAME_X_RADIUS/2)
output_space = TextMobject("Output Space")
output_space.to_edge(UP)
output_space.shift(RIGHT*FRAME_X_RADIUS/2)
line = Line(
UP*FRAME_Y_RADIUS, DOWN*FRAME_Y_RADIUS,
color = WHITE
)
self.play(
ShimmerIn(input_space),
ShimmerIn(output_space),
ShowCreation(line),
ShowCreation(self.interval),
)
self.wait()
def move_dot(self):
kwargs = {
"rate_func" : None,
"run_time" : 3
}
self.play(
Homotopy(self.input_homotopy, self.input_dot, **kwargs),
Homotopy(self.output_homotopy, self.output_dot, **kwargs),
ShowCreation(self.output, **kwargs)
)
self.wait()
def label_jump(self):
jump_points = Mobject(
Point(self.spiril1.points[-1]),
Point(self.spiril2.points[0])
)
self.brace = Brace(jump_points, RIGHT)
self.jump = TextMobject("Jump")
self.jump.next_to(self.brace, RIGHT)
self.play(
GrowFromCenter(self.brace),
ShimmerIn(self.jump)
)
self.wait()
self.remove(self.brace, self.jump)
def draw_circles(self):
input_value = 0.45
input_radius = 0.04
for dot in self.input_dot, self.output_dot:
dot.center()
kwargs = {
"rate_func" : lambda t : interpolate(1, input_value, smooth(t))
}
self.play(
Homotopy(self.input_homotopy, self.input_dot, **kwargs),
Homotopy(self.output_homotopy, self.output_dot, **kwargs)
)
A, B = list(map(Mobject.get_center, [self.input_dot, self.output_dot]))
A_text = TextMobject("A")
A_text.shift(A+2*(LEFT+UP))
A_arrow = Arrow(
A_text, self.input_dot,
color = self.input_color
)
B_text = TextMobject("B")
B_text.shift(B+2*RIGHT+DOWN)
B_arrow = Arrow(
B_text, self.output_dot,
color = self.output_color
)
tup = self.get_circles_and_points(
input_value-input_radius,
input_value+input_radius
)
input_circle, input_points, output_circle, output_points = tup
for text, arrow in [(A_text, A_arrow), (B_text, B_arrow)]:
self.play(
ShimmerIn(text),
ShowCreation(arrow)
)
self.wait()
self.remove(A_text, A_arrow, B_text, B_arrow)
self.play(ShowCreation(input_circle))
self.wait()
self.play(ShowCreation(input_points))
self.wait()
input_points_copy = input_points.copy()
self.play(
Transform(input_points_copy, output_points),
run_time = 2
)
self.wait()
self.play(ShowCreation(output_circle))
self.wait()
self.wait()
self.remove(*[
input_circle, input_points,
output_circle, input_points_copy
])
def vary_circle_sizes(self):
input_value = 0.45
radius = 0.04
vary_circles = VaryCircles(
self, input_value, radius,
run_time = 5,
)
self.play(vary_circles)
self.wait()
text = TextMobject("Function is ``Continuous at A''")
text.shift(2*UP).to_edge(LEFT)
arrow = Arrow(text, self.input_dot)
self.play(
ShimmerIn(text),
ShowCreation(arrow)
)
self.wait()
self.remove(vary_circles.mobject, text, arrow)
def discontinuous_point(self):
point_description = TextMobject(
"Point where the function jumps"
)
point_description.shift(3*RIGHT)
discontinuous_at_A = TextMobject(
"``Discontinuous at A''",
size = "\\Large"
)
discontinuous_at_A.shift(2*UP).to_edge(LEFT)
text = TextMobject("""
Circle around ouput \\\\
points can never \\\\
be smaller than \\\\
the jump
""")
text.scale(0.75)
text.shift(3.5*RIGHT)
input_value = 0.5
input_radius = 0.04
vary_circles = VaryCircles(
self, input_value, input_radius,
run_time = 5,
)
for dot in self.input_dot, self.output_dot:
dot.center()
kwargs = {
"rate_func" : lambda t : interpolate(0.45, input_value, smooth(t))
}
self.play(
Homotopy(self.input_homotopy, self.input_dot, **kwargs),
Homotopy(self.output_homotopy, self.output_dot, **kwargs)
)
discontinuous_arrow = Arrow(discontinuous_at_A, self.input_dot)
arrow = Arrow(
point_description, self.output_dot,
buff = 0.05,
color = self.output_color
)
self.play(
ShimmerIn(point_description),
ShowCreation(arrow)
)
self.wait()
self.remove(point_description, arrow)
tup = self.get_circles_and_points(
input_value-input_radius,
input_value+input_radius
)
input_circle, input_points, output_circle, output_points = tup
input_points_copy = input_points.copy()
self.play(ShowCreation(input_circle))
self.play(ShowCreation(input_points))
self.play(
Transform(input_points_copy, output_points),
run_time = 2
)
self.play(ShowCreation(output_circle))
self.wait()
self.play(ShimmerIn(text))
self.remove(input_circle, input_points, output_circle, input_points_copy)
self.play(vary_circles)
self.wait()
self.play(
ShimmerIn(discontinuous_at_A),
ShowCreation(discontinuous_arrow)
)
self.wait(3)
self.remove(vary_circles.mobject, discontinuous_at_A, discontinuous_arrow)
def continuous_point(self):
pass
class VaryCircles(Animation):
def __init__(self, scene, input_value, radius, **kwargs):
digest_locals(self)
Animation.__init__(self, Mobject(), **kwargs)
def update_mobject(self, alpha):
radius = self.radius + 0.9*self.radius*np.sin(1.5*np.pi*alpha)
self.mobject = Mobject(*self.scene.get_circles_and_points(
self.input_value-radius,
self.input_value+radius
)).ingest_submobjects()
class FunctionIsContinuousText(Scene):
def construct(self):
all_points = TextMobject("$f$ is continuous at every input point")
continuous = TextMobject("$f$ is continuous")
all_points.shift(UP)
continuous.shift(DOWN)
arrow = Arrow(all_points, continuous)
self.play(ShimmerIn(all_points))
self.play(ShowCreation(arrow))
self.play(ShimmerIn(continuous))
self.wait()
class DefineActualHilbertCurveText(Scene):
def construct(self):
self.add(TextMobject("""
Finally define a Hilbert Curve\\dots
"""))
self.wait()
class ReliesOnWonderfulProperty(Scene):
def construct(self):
self.add(TextMobject("""
\\dots which relies on a certain property
of Pseudo-Hilbert-curves.
"""))
self.wait()
class WonderfulPropertyOfPseudoHilbertCurves(Scene):
def construct(self):
val = 0.3
text = TextMobject([
"PHC", "$_n", "(", "%3.1f"%val, ")$",
" has a ", "limit point ", "as $n \\to \\infty$"
])
func_parts = text.copy().split()[:5]
Mobject(*func_parts).center().to_edge(UP)
num_str, val_str = func_parts[1], func_parts[3]
curve = UnitInterval()
curve.sort_points(lambda p : p[0])
dot = Dot().shift(curve.number_to_point(val))
arrow = Arrow(val_str, dot, buff = 0.1)
curve.add_numbers(0, 1)
self.play(ShowCreation(curve))
self.play(
ShimmerIn(val_str),
ShowCreation(arrow),
ShowCreation(dot)
)
self.wait()
self.play(
FadeOut(arrow),
*[
FadeIn(func_parts[i])
for i in (0, 1, 2, 4)
]
)
for num in range(2,9):
new_curve = HilbertCurve(order = num)
new_curve.scale(0.8)
new_dot = Dot(new_curve.points[int(val*new_curve.get_num_points())])
new_num_str = TexMobject(str(num)).replace(num_str)
self.play(
Transform(curve, new_curve),
Transform(dot, new_dot),
Transform(num_str, new_num_str)
)
self.wait()
text.to_edge(UP)
text_parts = text.split()
for index in 1, -1:
text_parts[index].set_color()
starters = Mobject(*func_parts + [
Point(mob.get_center(), stroke_width=1)
for mob in text_parts[5:]
])
self.play(Transform(starters, text))
arrow = Arrow(text_parts[-2].get_bottom(), dot, buff = 0.1)
self.play(ShowCreation(arrow))
self.wait()
class FollowManyPoints(Scene):
def construct(self):
text = TextMobject([
"PHC", "_n", "(", "x", ")$",
" has a limit point ", "as $n \\to \\infty$",
"\\\\ for all $x$"
])
parts = text.split()
parts[-1].next_to(Mobject(*parts[:-1]), DOWN)
parts[-1].set_color(BLUE)
parts[3].set_color(BLUE)
parts[1].set_color()
parts[-2].set_color()
text.to_edge(UP)
curve = UnitInterval()
curve.sort_points(lambda p : p[0])
vals = np.arange(0.1, 1, 0.1)
dots = Mobject(*[
Dot(curve.number_to_point(val))
for val in vals
])
curve.add_numbers(0, 1)
starter_dots = dots.copy().ingest_submobjects()
starter_dots.shift(2*UP)
self.add(curve, text)
self.wait()
self.play(DelayByOrder(ApplyMethod(starter_dots.shift, 2*DOWN)))
self.wait()
self.remove(starter_dots)
self.add(dots)
for num in range(1, 10):
new_curve = HilbertCurve(order = num)
new_curve.scale(0.8)
new_dots = Mobject(*[
Dot(new_curve.points[int(val*new_curve.get_num_points())])
for val in vals
])
self.play(
Transform(curve, new_curve),
Transform(dots, new_dots),
)
# self.wait()
class FormalDefinitionOfHilbertCurve(Scene):
def construct(self):
val = 0.7
text = TexMobject([
"\\text{HC}(", "x", ")",
"=\\lim_{n \\to \\infty}\\text{PHC}_n(", "x", ")"
])
text.to_edge(UP)
x1 = text.split()[1]
x2 = text.split()[-2]
x2.set_color(BLUE)
explanation = TextMobject("Actual Hilbert curve function")
exp_arrow = Arrow(explanation, text.split()[0])
curve = UnitInterval()
dot = Dot(curve.number_to_point(val))
x_arrow = Arrow(x1.get_bottom(), dot, buff = 0)
curve.sort_points(lambda p : p[0])
curve.add_numbers(0, 1)
self.add(*text.split()[:3])
self.play(
ShimmerIn(explanation),
ShowCreation(exp_arrow)
)
self.wait()
self.remove(explanation, exp_arrow)
self.play(ShowCreation(curve))
self.play(
ApplyMethod(x1.set_color, BLUE),
ShowCreation(x_arrow),
ShowCreation(dot)
)
self.wait()
self.remove(x_arrow)
limit = Mobject(*text.split()[3:]).ingest_submobjects()
limit.stroke_width = 1
self.play(ShimmerIn(limit))
for num in range(1, 9):
new_curve = HilbertCurve(order = num)
new_curve.scale(0.8)
new_dot = Dot(new_curve.points[int(val*new_curve.get_num_points())])
self.play(
Transform(curve, new_curve),
Transform(dot, new_dot),
)
class CouldNotDefineForSnakeCurve(Scene):
def construct(self):
self.add(TextMobject("""
You could not define a limit curve from
snake curves.
"""))
self.wait()
class ThreeThingsToProve(Scene):
def construct(self):
definition = TexMobject([
"\\text{HC}(", "x", ")",
"=\\lim_{n \\to \\infty}\\text{PHC}_n(", "x", ")"
])
definition.to_edge(UP)
definition.split()[1].set_color(BLUE)
definition.split()[-2].set_color(BLUE)
intro = TextMobject("Three things need to be proven")
prove_that = TextMobject("Prove that HC is $\\dots$")
prove_that.scale(0.7)
prove_that.to_edge(LEFT)
items = TextMobject([
"\\begin{enumerate}",
"\\item Well-defined: ",
"Points on Pseudo-Hilbert-curves really do converge",
"\\item A Curve: ",
"HC is continuous",
"\\item Space-filling: ",
"Each point in the unit square is an output of HC",
"\\end{enumerate}",
]).split()
items[1].set_color(GREEN)
items[3].set_color(YELLOW_C)
items[5].set_color(MAROON)
Mobject(*items).to_edge(RIGHT)
self.add(definition)
self.play(ShimmerIn(intro))
self.wait()
self.play(Transform(intro, prove_that))
for item in items[1:-1]:
self.play(ShimmerIn(item))
self.wait()
class TilingSpace(Scene):
def construct(self):
coords_set = [ORIGIN]
for n in range(int(FRAME_WIDTH)):
for vect in UP, RIGHT:
for k in range(n):
new_coords = coords_set[-1]+((-1)**n)*vect
coords_set.append(new_coords)
square = Square(side_length = 1, color = WHITE)
squares = Mobject(*[
square.copy().shift(coords)
for coords in coords_set
]).ingest_submobjects()
self.play(
DelayByOrder(FadeIn(squares)),
run_time = 3
)
curve = HilbertCurve(order = 6).scale(1./6)
all_curves = Mobject(*[
curve.copy().shift(coords)
for coords in coords_set
]).ingest_submobjects()
all_curves.thin_out(10)
self.play(ShowCreation(
all_curves,
rate_func = None,
run_time = 15
))
class ColorIntervals(Scene):
def construct(self):
number_line = NumberLine(
numerical_radius = 5,
number_at_center = 5,
leftmost_tick = 0,
density = 2*DEFAULT_POINT_DENSITY_1D
)
number_line.shift(2*RIGHT)
number_line.add_numbers()
number_line.scale(2)
brace = Brace(Mobject(
*number_line.submobjects[:2]
))
self.add(number_line)
for n in range(0, 10, 2):
if n == 0:
brace_anim = GrowFromCenter(brace)
else:
brace_anim = ApplyMethod(brace.shift, 2*RIGHT)
self.play(
ApplyMethod(
number_line.set_color,
RED,
lambda p : p[0] > n-6.2 and p[0] < n-4 and p[1] > -0.4
),
brace_anim
)
| 30.640977 | 134 | 0.555518 |
4a27422523ba02a996ae6f97dd7dde2c90b46fc3 | 193 | py | Python | api/lambda_handler/get_random_color.py | dgimeno777/aws-api-demo | 7ff2386737ba8e5e953dbe86e1afa41548c4efc1 | [
"MIT"
] | null | null | null | api/lambda_handler/get_random_color.py | dgimeno777/aws-api-demo | 7ff2386737ba8e5e953dbe86e1afa41548c4efc1 | [
"MIT"
] | null | null | null | api/lambda_handler/get_random_color.py | dgimeno777/aws-api-demo | 7ff2386737ba8e5e953dbe86e1afa41548c4efc1 | [
"MIT"
] | null | null | null | import json
from api.src.colors import get_random_color
def lambda_handler(event, context):
return {
"statusCode": 200,
"body": json.dumps(get_random_color().value)
}
| 19.3 | 52 | 0.673575 |
4a274232edcedda9393b016fb72088fb5bf0d757 | 1,099 | py | Python | cibyl/outputs/cli/ci/env/printer.py | rhos-infra/cibyl | 842a993ddf3552d1b4f2e85025dcf928f76fe7fb | [
"Apache-2.0"
] | 3 | 2022-02-17T18:07:07.000Z | 2022-03-19T10:22:38.000Z | cibyl/outputs/cli/ci/env/printer.py | rhos-infra/cibyl | 842a993ddf3552d1b4f2e85025dcf928f76fe7fb | [
"Apache-2.0"
] | 58 | 2022-02-14T14:41:22.000Z | 2022-03-31T10:54:28.000Z | cibyl/outputs/cli/ci/env/printer.py | rhos-infra/cibyl | 842a993ddf3552d1b4f2e85025dcf928f76fe7fb | [
"Apache-2.0"
] | 6 | 2022-02-14T19:21:26.000Z | 2022-03-29T09:31:31.000Z | """
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from abc import ABC, abstractmethod
from cibyl.outputs.cli.printer import Printer
class CIPrinter(Printer, ABC):
"""Interface for printers of a CI model hierarchy.
"""
@abstractmethod
def print_environment(self, env):
"""
:param env: The environment.
:type env: :class:`cibyl.models.ci.base.environment.Environment`
:return: Textual representation of the provided model.
:rtype: str
"""
raise NotImplementedError
| 32.323529 | 78 | 0.689718 |
4a27437f92548ec7ff55d69a520d4dd3412ea13a | 6,440 | py | Python | mlrun/frameworks/_ml_common/plans/roc_curve_plan.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | mlrun/frameworks/_ml_common/plans/roc_curve_plan.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | mlrun/frameworks/_ml_common/plans/roc_curve_plan.py | george0st/mlrun | 6467d3a5ceadf6cd35512b84b3ddc3da611cf39a | [
"Apache-2.0"
] | null | null | null | from typing import Dict, List, Union
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from sklearn.metrics import roc_auc_score, roc_curve
from mlrun.artifacts import Artifact, PlotlyArtifact
from ..._common import ModelType
from ..plan import MLPlanStages, MLPlotPlan
from ..utils import DatasetType, to_dataframe
class ROCCurvePlan(MLPlotPlan):
"""
Plan for producing a receiver operating characteristic (ROC) - a plot that shows the connection / trade-off between
clinical sensitivity and specificity for every possible cut-off for a test or a combination of tests.
"""
_ARTIFACT_NAME = "roc-curves"
def __init__(
self,
pos_label: Union[str, int] = None,
sample_weight: np.ndarray = None,
drop_intermediate: bool = True,
average: str = "macro",
max_fpr: float = None,
multi_class: str = "raise",
labels: List[str] = None,
):
"""
Initialize a receiver operating characteristic plan with the given configuration.
To read more about the parameters, head to the SciKit-Learn docs at:
* https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html
* https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html
:param pos_label: The label of the positive class. When None, if 'y_true' (y) is in {-1, 1} or {0, 1},
'pos_label' is set to 1, otherwise an error will be raised. Defaulted to None.
:param sample_weight: Sample weights to apply.
:param drop_intermediate: Whether to drop some suboptimal thresholds which would not appear on a plotted ROC
curve. Defaulted to True.
:param average: Determines the type of averaging performed on the data. If None, the scores for each
class are returned. Defaulted to "macro".
:param max_fpr: For multiclass it should be equal to 1 or None. If not None, the standardized partial
AUC [2] over the range [0, max_fpr] is returned. Defaulted to None.
:param multi_class: Only used for multiclass targets. Determines the type of configuration to use. Can be
one of {'raise', 'ovr', 'ovo'}. Defaulted to "raise".
:param labels: Only used for multiclass targets. List of labels that index the classes in 'y_pred'.
If None, the labels found in 'y_true' (y) will be used. Defaulted to None.
"""
# Store the parameters:
self._pos_label = pos_label
self._sample_weight = sample_weight
self._drop_intermediate = drop_intermediate
self._average = average
self._max_fpr = max_fpr
self._multi_class = multi_class
self._labels = labels
# Continue the initialization for the MLPlan:
super(ROCCurvePlan, self).__init__(need_probabilities=True)
def is_ready(self, stage: MLPlanStages, is_probabilities: bool) -> bool:
"""
Check whether or not the plan is fit for production by the given stage and prediction probabilities. The
roc curve is ready only post prediction probabilities.
:param stage: The stage to check if the plan is ready.
:param is_probabilities: True if the 'y_pred' that will be sent to 'produce' is a prediction of probabilities
(from 'predict_proba') and False if not.
:return: True if the plan is producible and False otherwise.
"""
return stage == MLPlanStages.POST_PREDICT and is_probabilities
def produce(
self,
y: DatasetType,
y_pred: DatasetType = None,
model: ModelType = None,
x: DatasetType = None,
**kwargs,
) -> Dict[str, Artifact]:
"""
Produce the roc curve according to the ground truth (y) and predictions (y_pred) values. If predictions are not
available, the model and a dataset can be given to produce them.
:param y: The ground truth values.
:param y_pred: The predictions values.
:param model: Model to produce the predictions.
:param x: Input dataset to produce the predictions.
:return: The produced roc curve artifact in an artifacts dictionary.
"""
# Calculate the predictions if needed:
y_pred = self._calculate_predictions(y_pred=y_pred, model=model, x=x)
# Convert to DataFrame:
y = to_dataframe(dataset=y)
y_pred = to_dataframe(dataset=y_pred)
# One hot encode the labels in order to plot them
y_one_hot = pd.get_dummies(y, columns=y.columns.to_list())
# Create an empty figure:
fig = go.Figure()
fig.add_shape(type="line", line={"dash": "dash"}, x0=0, x1=1, y0=0, y1=1)
# Iteratively add new lines every time we compute a new class:
for i in range(y_pred.shape[1]):
class_i_true = y_one_hot.iloc[:, i]
class_i_pred = y_pred.iloc[:, i]
fpr, tpr, _ = roc_curve(
class_i_true,
class_i_pred,
pos_label=self._pos_label,
sample_weight=self._sample_weight,
drop_intermediate=self._drop_intermediate,
)
auc_score = roc_auc_score(
class_i_true,
class_i_pred,
average=self._average,
sample_weight=self._sample_weight,
max_fpr=self._max_fpr,
multi_class=self._multi_class,
labels=self._labels,
)
name = f"{y_one_hot.columns[i]} (AUC={auc_score:.2f})"
fig.add_trace(go.Scatter(x=fpr, y=tpr, name=name, mode="lines"))
# Configure the layout:
fig.update_layout(
xaxis_title="False Positive Rate",
yaxis_title="True Positive Rate",
yaxis={"scaleanchor": "x", "scaleratio": 1},
xaxis={"constrain": "domain"},
width=700,
height=500,
)
# Creating the plot artifact:
self._artifacts[self._ARTIFACT_NAME] = PlotlyArtifact(
key=self._ARTIFACT_NAME,
figure=fig,
)
return self._artifacts
| 42.091503 | 119 | 0.612112 |
4a274490f468fdb36049590578a088584870be4b | 382 | py | Python | utilities/micro_utils/marshmallow/leading_zeros_integer_field.py | ndjuric93/MusicOrganizer | ef2e50abfeb1629325274b260f654935fd3f2740 | [
"Apache-2.0"
] | 1 | 2019-09-13T18:05:27.000Z | 2019-09-13T18:05:27.000Z | utilities/micro_utils/marshmallow/leading_zeros_integer_field.py | ndjuric93/MusicOrganizer | ef2e50abfeb1629325274b260f654935fd3f2740 | [
"Apache-2.0"
] | 5 | 2021-03-09T00:49:53.000Z | 2022-02-17T20:03:16.000Z | utilities/micro_utils/marshmallow/leading_zeros_integer_field.py | ndjuric93/MusicOrganizer | ef2e50abfeb1629325274b260f654935fd3f2740 | [
"Apache-2.0"
] | null | null | null | from marshmallow import fields
class LeadingZerosIntegerField(fields.Integer):
def __init__(self, zeros_count, *args, **kwargs):
super(LeadingZerosIntegerField, self).__init__(*args, **kwargs)
self.zeros_count = zeros_count
def _format_num(self, value):
if value is None:
return None
return str(value).zfill(self.zeros_count)
| 27.285714 | 71 | 0.685864 |
4a2744b9c5e65e3a966fecf05ad9a7c7e889f15c | 840 | py | Python | python/BFS/993. Cousins in Binary Tree.py | Nobodylesszb/LeetCode | 0e902f6bff4834a93ce64cf9c57fd64297e63523 | [
"MIT"
] | null | null | null | python/BFS/993. Cousins in Binary Tree.py | Nobodylesszb/LeetCode | 0e902f6bff4834a93ce64cf9c57fd64297e63523 | [
"MIT"
] | null | null | null | python/BFS/993. Cousins in Binary Tree.py | Nobodylesszb/LeetCode | 0e902f6bff4834a93ce64cf9c57fd64297e63523 | [
"MIT"
] | null | null | null | #In a binary tree, the root node is at depth 0, and children of each depth k node are at depth k+1.
# Two nodes of a binary tree are cousins if they have the same depth, but have different parents.
# We are given the root of a binary tree with unique values, and the values x and y of two different nodes in the tree.
# Return true if and only if the nodes corresponding to the values x and y are cousins.
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
def dfs(node, parent, depth, mod):
if node:
if node.val == mod:
return depth, parent
return dfs(node.left, node, depth + 1, mod) or dfs(node.right, node, depth + 1, mod)
dx, px, dy, py = dfs(root, None, 0, x) + dfs(root, None, 0, y)
return dx == dy and px != py | 49.411765 | 119 | 0.628571 |
4a27458679f341a6776b31d457ac77e5847b502f | 8,089 | py | Python | tests/full_flow_tests.py | FelixLorenz/distiller | 08b5cd95704d850cfb845ed7785f739cbb57de54 | [
"Apache-2.0"
] | 3 | 2019-11-03T23:52:42.000Z | 2020-05-07T09:08:32.000Z | tests/full_flow_tests.py | FelixLorenz/distiller | 08b5cd95704d850cfb845ed7785f739cbb57de54 | [
"Apache-2.0"
] | null | null | null | tests/full_flow_tests.py | FelixLorenz/distiller | 08b5cd95704d850cfb845ed7785f739cbb57de54 | [
"Apache-2.0"
] | 1 | 2019-10-15T03:44:17.000Z | 2019-10-15T03:44:17.000Z | #
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import os
import errno
import re
from collections import namedtuple
import argparse
import time
DS_CIFAR = 'cifar10'
distiller_root = os.path.realpath('..')
examples_root = os.path.join(distiller_root, 'examples')
script_path = os.path.realpath(os.path.join(examples_root, 'classifier_compression',
'compress_classifier.py'))
###########
# Some Basic Logging Mechanisms
###########
class Colors:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
WHITE = '\033[37m'
BG_RED = '\033[41m'
BG_GREEN = '\033[42m'
BG_YELLOW = '\033[43m'
BG_BLUE = '\033[44m'
BG_PURPLE = '\033[45m'
BG_CYAN = '\033[30;46m'
BG_WHITE = '\x1b[30;47m'
BG_RESET = '\033[49m'
BOLD = '\033[1m'
UNDERLINE_ON = '\033[4m'
UNDERLINE_OFF = '\033[24m'
END = '\033[0m'
def colorize(string, color):
return color + string + Colors.END
def error(string):
print(colorize('ERROR: ' + string, Colors.RED))
def test_progress(string):
print(colorize(string, Colors.BLUE))
def success(string):
print(colorize(string, Colors.GREEN))
###########
# Checkers
###########
def compare_values(name, expected, actual):
print('Comparing {0}: Expected = {1} ; Actual = {2}'.format(name, expected, actual))
if expected != actual:
error('Mismatch on {0}'.format(name))
return False
else:
return True
def accuracy_checker(log, expected_top1, expected_top5):
tops = re.findall(r"Top1: (?P<top1>\d*\.\d*) *Top5: (?P<top5>\d*\.\d*)", log)
if not tops:
error('No accuracy results in log')
return False
if not compare_values('Top-1', expected_top1, float(tops[-1][0])):
return False
return compare_values('Top-5', expected_top5, float(tops[-1][1]))
def collateral_checker(log, *collateral_list):
"""Test that the test produced the expected collaterals.
A collateral_list is a list of tuples, where tuple elements are:
0: file name
1: expected file size
"""
for collateral in collateral_list:
statinfo = os.stat(collateral[0])
if statinfo.st_size != collateral[1]:
return False
return True
###########
# Test Configurations
###########
TestConfig = namedtuple('TestConfig', ['args', 'dataset', 'checker_fn', 'checker_args'])
test_configs = [
TestConfig('--arch simplenet_cifar --epochs 2', DS_CIFAR, accuracy_checker, [44.610, 92.080]),
TestConfig('-a resnet20_cifar --resume {0} --quantize-eval --evaluate'.
format(os.path.join(examples_root, 'ssl', 'checkpoints', 'checkpoint_trained_dense.pth.tar')),
DS_CIFAR, accuracy_checker, [91.710, 99.610]),
TestConfig('-a preact_resnet20_cifar --epochs 2 --compress {0}'.
format(os.path.join('full_flow_tests', 'preact_resnet20_cifar_pact_test.yaml')),
DS_CIFAR, accuracy_checker, [54.590, 94.810]),
TestConfig('-a resnet20_cifar --resume {0} --sense=filter --sense-range 0 0.10 0.05'.
format(os.path.join(examples_root, 'ssl', 'checkpoints', 'checkpoint_trained_dense.pth.tar')),
DS_CIFAR, collateral_checker, [('sensitivity.csv', 3175), ('sensitivity.png', 96158)])
]
###########
# Tests Execution
###########
def process_failure(msg, test_idx, cmd, log_path, failed_tests, log):
error(msg)
if not log_path:
test_progress('Log file not created. Full output from test:')
print(log)
else:
test_progress('Test log file: {0}'.format(colorize(log_path, Colors.YELLOW)))
failed_tests.append((test_idx, cmd, log_path))
def validate_dataset_path(path, default, name):
if path:
path = os.path.expanduser(path)
if not os.path.isdir(path):
error("Path provided to {0} dataset doesn't exist".format(name))
exit(1)
return path
test_progress('Path to {0} dataset not provided, defaulting to: {1}'.format(name,
colorize(os.path.abspath(default),
Colors.WHITE)))
try:
os.makedirs(default)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return default
def run_tests():
parser = argparse.ArgumentParser()
parser.add_argument('--cifar10-path', dest='cifar10_path', metavar='DIR', help='Path to CIFAR-10 dataset')
args = parser.parse_args()
cifar10_path = validate_dataset_path(args.cifar10_path, default='data.cifar10', name='CIFAR-10')
datasets = {DS_CIFAR: cifar10_path}
total_configs = len(test_configs)
failed_tests = []
for idx, tc in enumerate(test_configs):
print('')
test_progress('-------------------------------------------------')
test_progress('Running Test {0} / {1}'.format(idx + 1, total_configs))
dataset_dir = datasets[tc.dataset]
# Run with '--det -j 1' to ensure deterministic results
# Run with single GPU (lowest common denominator...)
cmd = 'python3 {script} {tc_args} --det -j 1 --gpus 0 {data}'.format(script=script_path, tc_args=tc.args,
data=dataset_dir)
test_progress('Executing command: ' + colorize(cmd, Colors.YELLOW))
p = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
# Poll for completion
waiting_chars = ['-', '\\', '|', '/']
cnt = 0
while p.poll() is None:
print(waiting_chars[cnt] * 5, end='\r', flush=True)
cnt = (cnt + 1) % len(waiting_chars)
time.sleep(0.5)
log = p.stdout.read()
log_path = re.match(r"Log file for this run: (.*)", log)
log_path = log_path.groups()[0] if log_path else ''
if p.returncode != 0:
process_failure('Command returned with exit status {0}'.
format(p.returncode), idx, cmd, log_path, failed_tests, log)
continue
test_progress('Running checker: ' + colorize(tc.checker_fn.__name__, Colors.YELLOW))
if not tc.checker_fn(log, *tc.checker_args):
process_failure('Checker failed', idx, cmd, log_path, failed_tests, log)
continue
success('TEST PASSED')
test_progress('Test log file: {0}'.format(colorize(log_path, Colors.YELLOW)))
print('')
test_progress('-------------------------------------------------')
test_progress('-------------------------------------------------')
test_progress('All tests completed')
test_progress('# Tests run: {0} ; # Tests passed {1} ; # Tests failed: {2}'.
format(total_configs, total_configs - len(failed_tests), len(failed_tests)))
if failed_tests:
print('')
print(colorize('Failed tests summary:', Colors.RED))
for idx, cmd, log_path in failed_tests:
print(colorize(' Test Index:', Colors.YELLOW), idx + 1)
print(colorize(' Command Line:', Colors.YELLOW), cmd)
print(colorize(' Log File Path:', Colors.YELLOW), log_path)
exit(1)
print('')
success('ALL TESTS PASSED')
print('')
exit(0)
if __name__ == '__main__':
run_tests()
| 34.716738 | 119 | 0.598714 |
4a2745a0fa61f5e8b66dec5985b1686a33f641bc | 1,403 | py | Python | python/pycylon/test/test_parquet.py | kaiyingshan/cylon | 1a8c3d7efab000ec665eeba3c9a58dfac9531701 | [
"Apache-2.0"
] | 3 | 2019-11-18T23:59:03.000Z | 2020-05-28T06:17:49.000Z | python/pycylon/test/test_parquet.py | DSC-SPIDAL/twisternet | 773f11ff79648938dc5dd0678b786b97ed289b36 | [
"Apache-2.0"
] | 33 | 2019-10-17T15:59:48.000Z | 2020-06-19T14:22:41.000Z | python/pycylon/test/test_parquet.py | DSC-SPIDAL/twisternet | 773f11ff79648938dc5dd0678b786b97ed289b36 | [
"Apache-2.0"
] | 15 | 2019-10-17T02:09:55.000Z | 2020-05-23T02:04:45.000Z | ##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Run test:
>> pytest -q python/pycylon/test/test_parquet.py
"""
import pandas as pd
from pyarrow.parquet import read_table
from pycylon.frame import DataFrame, CylonEnv
def test_read_parquet():
tbl = read_table('data/input/parquet1_0.parquet')
cdf = DataFrame(tbl)
pdf = pd.read_parquet('file://data/input/parquet1_0.parquet')
assert (pdf.values.tolist() == cdf.to_pandas().values.tolist())
def test_parquet_join():
cdf1 = DataFrame(read_table('data/input/parquet1_0.parquet'))
cdf2 = DataFrame(read_table('data/input/parquet2_0.parquet'))
expected = DataFrame(read_table('data/output/join_inner_1_0.parquet'))
out = cdf1.merge(cdf2, how='inner', on=[0], algorithm='sort', suffixes=('lt-', 'rt-'))
assert(expected.equals(out, ordered=False))
assert (len(expected.to_table().subtract(out.to_table())) == 0)
| 34.219512 | 90 | 0.729865 |
4a27460046cffe6cdeb324c5d5b67ebaa7e1457a | 10,459 | py | Python | poni/rcontrol_paramiko.py | saaros/poni | 1d2a9982f7e934137ce1ee675f0e1373314df7c7 | [
"Apache-2.0"
] | 6 | 2015-05-11T12:21:53.000Z | 2018-01-17T00:07:29.000Z | poni/rcontrol_paramiko.py | saaros/poni | 1d2a9982f7e934137ce1ee675f0e1373314df7c7 | [
"Apache-2.0"
] | null | null | null | poni/rcontrol_paramiko.py | saaros/poni | 1d2a9982f7e934137ce1ee675f0e1373314df7c7 | [
"Apache-2.0"
] | 2 | 2015-02-27T10:14:26.000Z | 2015-02-27T10:14:30.000Z | """
Remote node control using the Paramiko SSH library
Copyright (c) 2010-2012 Mika Eloranta
See LICENSE for details.
"""
import os
import sys
import socket
import time
from . import errors
from . import rcontrol
import select
import termios
import tty
import errno
import warnings
try:
with warnings.catch_warnings():
# paramiko needs to be imported with warnings disabled to get rid of
# a useless (really) crypto warning
warnings.simplefilter("ignore")
import paramiko
except AttributeError:
import paramiko
try:
from select import epoll
except ImportError:
epoll = None
def convert_paramiko_errors(method):
"""Convert remote Paramiko errors to errors.RemoteError"""
def wrapper(self, *args, **kw):
try:
return method(self, *args, **kw)
except IOError as error:
if error.errno == errno.ENOENT:
raise errors.RemoteFileDoesNotExist(str(error))
else:
raise errors.RemoteError("%s: %s" % (error.__class__.__name__,
error))
except (socket.error, paramiko.SSHException, EOFError) as error:
raise errors.RemoteError("%s: %s" % (error.__class__.__name__,
error))
wrapper.__doc__ = method.__doc__
wrapper.__name__ = method.__name__
return wrapper
def interactive_shell(chan):
"""stolen from paramiko examples"""
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
while True:
r, w, e = select.select([chan, sys.stdin], [], [])
if chan in r:
try:
x = chan.recv(1024)
if len(x) == 0:
break
sys.stdout.write(x)
sys.stdout.flush()
except socket.timeout:
pass
if sys.stdin in r:
x = sys.stdin.read(1)
if len(x) == 0:
break
chan.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
class ParamikoRemoteControl(rcontrol.SshRemoteControl):
def __init__(self, node):
rcontrol.SshRemoteControl.__init__(self, node)
self._ssh = None
self._sftp = None
self.ping_interval = 10
def get_sftp(self):
if not self._sftp:
self._sftp = self.get_ssh(lambda ssh: ssh.open_sftp())
return self._sftp
@convert_paramiko_errors
def read_file(self, file_path):
file_path = str(file_path)
sftp = self.get_sftp()
return sftp.file(file_path, mode="rb").read()
@convert_paramiko_errors
def write_file(self, file_path, contents, mode=None, owner=None,
group=None):
file_path = str(file_path)
sftp = self.get_sftp()
f = sftp.file(file_path, mode="wb")
if mode is not None:
sftp.chmod(file_path, mode)
if (owner is not None) or (group is not None):
# set owner and group
file_stat = sftp.stat(file_path)
sftp.chown(file_path,
owner if (owner is not None) else file_stat.st_uid,
group if (group is not None) else file_stat.st_gid)
f.write(contents)
f.close()
def close(self):
if self._sftp:
self._sftp.close()
self._sftp = None
if self._ssh:
self._ssh.close()
self._ssh = None
def get_ssh(self, action=None):
host = self.node.get("host")
user = self.node.get("user")
password = self.node.get("password")
port = int(self.node.get("ssh-port", os.environ.get("PONI_SSH_PORT", 22)))
if not host:
raise errors.RemoteError("%s: 'host' property not defined" % (
self.node.name))
elif not user:
raise errors.RemoteError("%s: 'user' property not defined" % (
self.node.name))
if self.key_filename:
key_file = self.key_filename
if not os.path.isabs(key_file):
key_file = "%s/.ssh/%s" % (os.environ.get("HOME"),
key_file)
else:
key_file = None
self.log.debug("ssh connect: host=%s, port=%r, user=%s, key=%s",
host, port, user, key_file)
end_time = time.time() + self.connect_timeout
while time.time() < end_time:
try:
if not self._ssh:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, port=port, username=user, key_filename=key_file, password=password)
self._ssh = ssh
return action(self._ssh) if action else self._ssh
except (socket.error, paramiko.SSHException) as error:
remaining = max(0, end_time - time.time())
self.log.warning("%s: ssh connection to %s failed: %s: %s, "
"retry time remaining=%.0fs",
self.node.name, host,
error.__class__.__name__, error, remaining)
self._ssh = None
time.sleep(2.5)
raise errors.RemoteError("%s: ssh connect failed: %s: %s" % (
self.node.name, error.__class__.__name__, error))
@convert_paramiko_errors
def execute_command(self, cmd, pseudo_tty=False):
def get_channel(ssh):
channel = ssh.get_transport().open_session()
if not channel:
raise paramiko.SSHException("channel opening failed")
return channel
channel = self.get_ssh(get_channel)
if not channel:
raise errors.RemoteError("failed to open an SSH session to %s" % (
self.node.name))
if pseudo_tty:
channel.get_pty()
channel.set_combine_stderr(True) # TODO: separate stdout/stderr?
BS = 2**16
rx_time = time.time()
log_name = "%s (%s): %r" % (self.node.name, self.node.get("host"), cmd)
next_warn = time.time() + self.warn_timeout
next_ping = time.time() + self.ping_interval
def available_output():
"""read all the output that is immediately available"""
while channel.recv_ready():
chunk = channel.recv(BS)
yield rcontrol.STDOUT, chunk
channel.exec_command(cmd)
channel.shutdown_write()
exit_code = None
if epoll:
poll = select.epoll()
poll.register(channel.fileno(), select.EPOLLIN)
else:
poll = None
try:
while True:
if (exit_code is None) and channel.exit_status_ready():
# process has finished executing, but there still may be
# output to read from stdout or stderr
exit_code = channel.recv_exit_status()
# wait for input, note that the results are not used for anything
if poll:
try:
poll.poll(timeout=1.0) # just poll, not interested in the fileno
except IOError as ex:
if ex.errno != errno.EINTR:
raise
continue
else:
select.select([channel], [], [], 1.0)
for output in available_output():
rx_time = time.time()
next_warn = time.time() + self.warn_timeout
yield output
if channel.closed and (exit_code is not None):
yield rcontrol.DONE, exit_code
break # everything done!
now = time.time()
if now > (rx_time + self.terminate_timeout):
# no output in a long time, terminate connection
raise errors.RemoteError(
"%s: no output in %.1f seconds, terminating" % (
log_name, self.terminate_timeout))
if now > next_warn:
elapsed_since = time.time() - rx_time
self.log.warning("%s: no output in %.1fs", log_name,
elapsed_since)
next_warn = time.time() + self.warn_timeout
if now > next_ping:
channel.transport.send_ignore()
next_ping = time.time() + self.ping_interval
finally:
if poll:
poll.close()
@convert_paramiko_errors
def execute_shell(self):
def invoke_shell(ssh):
# TODO: get dimensions from `stty size` or something like that
return ssh.invoke_shell(term='vt100', width=80, height=24)
try:
channel = self.get_ssh(invoke_shell)
interactive_shell(channel)
finally:
if channel:
channel.close()
@convert_paramiko_errors
def stat(self, file_path):
file_path = str(file_path)
sftp = self.get_sftp()
return sftp.stat(file_path)
@convert_paramiko_errors
def put_file(self, source_path, dest_path, callback=None):
source_path = str(source_path)
dest_path = str(dest_path)
sftp = self.get_sftp()
sftp.put(source_path, dest_path, callback=callback)
@convert_paramiko_errors
def makedirs(self, dir_path):
sftp = self.get_sftp()
create_dirs = []
while 1:
try:
sftp.stat(dir_path)
break # dir exists
except (paramiko.SSHException, IOError):
create_dirs.insert(0, dir_path)
dir_path, rest = os.path.split(dir_path)
if not dir_path or not rest:
break
for dir_path in create_dirs:
sftp.mkdir(dir_path)
@convert_paramiko_errors
def utime(self, file_path, times):
sftp = self.get_sftp()
sftp.utime(str(file_path), times)
| 34.068404 | 105 | 0.536571 |
4a27465b409074b2c4b003a44020bc92d0679e91 | 114,353 | py | Python | tools/python/transform/transformer.py | juierror/mace | a2bcf2c98b410f75d38f2291585b0ad11b36d068 | [
"Apache-2.0"
] | 1 | 2020-07-22T08:12:24.000Z | 2020-07-22T08:12:24.000Z | tools/python/transform/transformer.py | deadzq/mace | aec8421951390ee1dfec4db5c118805908776330 | [
"Apache-2.0"
] | null | null | null | tools/python/transform/transformer.py | deadzq/mace | aec8421951390ee1dfec4db5c118805908776330 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The MACE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import numpy as np
import six
from py_proto import mace_pb2
from transform import base_converter
from transform.base_converter import ActivationType
from transform.base_converter import ConverterUtil
from transform.base_converter import DataFormat
from transform.base_converter import DeviceType
from transform.base_converter import EltwiseType
from transform.base_converter import FrameworkType
from transform.base_converter import MaceKeyword
from transform.base_converter import MaceOp
from transform.base_converter import MaceFixedDataFormatOps # noqa
from transform.base_converter import MaceTransposableDataFormatOps # noqa
from transform.base_converter import PaddingMode
from transform.base_converter import ReduceType
from transform.base_converter import TransformerRule
from quantize import quantize_util
from utils.util import mace_check
class Transformer(base_converter.ConverterInterface):
"""A class for transform naive mace model to optimized model.
This Transformer should be platform irrelevant. So, do not assume
tensor name has suffix like ':0".
"""
def __init__(self, option, model):
# Dependencies
# (TRANSFORM_MATMUL_TO_FC, TRANSFORM_GLOBAL_CONV_TO_FC) -> RESHAPE_FC_WEIGHT # noqa
self._registered_transformers = {
TransformerRule.TRANSFORM_FAKE_QUANTIZE:
self.transform_fake_quantize,
TransformerRule.REMOVE_USELESS_OP: self.remove_useless_op,
TransformerRule.TRANSFORM_GLOBAL_POOLING:
self.transform_global_pooling,
TransformerRule.TRANSFORM_LSTMCELL_ZEROSTATE:
self.transform_lstmcell_zerostate,
TransformerRule.TRANSFORM_BASIC_LSTMCELL:
self.transform_basic_lstmcell,
TransformerRule.FOLD_RESHAPE: self.fold_reshape,
TransformerRule.TRANSFORM_MATMUL_TO_FC:
self.transform_matmul_to_fc,
TransformerRule.FOLD_BATCHNORM: self.fold_batchnorm,
TransformerRule.FOLD_BIASADD: self.fold_biasadd,
TransformerRule.FOLD_CONV_AND_BN:
self.fold_conv_and_bn, # data_format related
TransformerRule.FOLD_DECONV_AND_BN:
self.fold_deconv_and_bn, # data_format related
TransformerRule.FOLD_DEPTHWISE_CONV_AND_BN:
self.fold_depthwise_conv_and_bn, # data_format related
TransformerRule.TRANSFORM_ADD_TO_BIASADD:
self.transform_add_to_biasadd,
TransformerRule.REARRANGE_BATCH_TO_SPACE:
self.rearrange_batch_to_space,
TransformerRule.FLATTEN_ATROUS_CONV: self.flatten_atrous_conv,
TransformerRule.FOLD_ACTIVATION: self.fold_activation,
TransformerRule.FOLD_SQRDIFF_MEAN: self.fold_squared_diff_mean,
TransformerRule.FOLD_EMBEDDING_LOOKUP: self.fold_embedding_lookup,
TransformerRule.TRANSPOSE_FILTERS: self.transpose_filters,
TransformerRule.TRANSPOSE_MATMUL_WEIGHT:
self.transpose_matmul_weight,
TransformerRule.FOLD_FC_RESHAPE:
self.fold_fc_reshape,
TransformerRule.ADD_IN_OUT_TENSOR_INFO:
self.add_in_out_tensor_info,
TransformerRule.ADD_WINOGRAD_ARG: self.add_winograd_arg,
TransformerRule.TRANSFORM_GLOBAL_CONV_TO_FC:
self.transform_global_conv_to_fc,
TransformerRule.RESHAPE_FC_WEIGHT: self.reshape_fc_weight,
TransformerRule.QUANTIZE_NODES:
self.quantize_nodes,
TransformerRule.ADD_QUANTIZE_TENSOR_RANGE:
self.add_quantize_tensor_range,
TransformerRule.QUANTIZE_WEIGHTS:
self.quantize_weights,
TransformerRule.UPDATE_FLOAT_OP_DATA_TYPE:
self.update_float_op_data_type,
TransformerRule.ADD_OPENCL_INFORMATIONS:
self.add_opencl_informations,
TransformerRule.SORT_BY_EXECUTION: self.sort_by_execution,
TransformerRule.UPDATE_DATA_FORMAT: self.update_data_format,
TransformerRule.TRANSPOSE_RESHAPE_AND_FLATTEN:
self.transform_reshape_and_flatten,
TransformerRule.TRANSPOSE_SHAPE_TENSOR_TO_PARAM:
self.transform_shape_tensor_to_param,
TransformerRule.TRANSPOSE_DATA_FORMAT: self.transpose_data_format,
TransformerRule.CHECK_QUANTIZE_INFO:
self.check_quantize_info,
TransformerRule.TRANSFORM_CHANNEL_SHUFFLE:
self.transform_channel_shuffle,
TransformerRule.QUANTIZE_SPECIFIC_OPS_ONLY:
self.quantize_specific_ops_only,
TransformerRule.FP16_MATMUL_WEIGHT:
self.fp16_matmul_weight,
TransformerRule.FP16_GATHER_WEIGHT:
self.fp16_gather_weight,
TransformerRule.QUANTIZE_LARGE_WEIGHTS:
self.quantize_large_weights,
}
self._option = option
self._model = model
self._wino_arg = self._option.winograd
self._ops = {}
self._consts = {}
self._consumers = {}
self._producer = {}
self._quantize_activation_info = {}
self._quantized_tensor = set()
self.input_name_map = {}
self.output_name_map = {}
self.initialize_name_map()
def run(self):
for key in self._option.transformer_option:
transformer = self._registered_transformers[key]
while True:
self.construct_ops_and_consumers(key)
changed = transformer()
if not changed:
break
return self._model, self._quantize_activation_info
def initialize_name_map(self):
for input_node in self._option.input_nodes.values():
new_input_name = MaceKeyword.mace_input_node_name \
+ '_' + input_node.name
self.input_name_map[input_node.name] = new_input_name
output_nodes = self._option.check_nodes.values()
for output_node in output_nodes:
new_output_name = MaceKeyword.mace_output_node_name \
+ '_' + output_node.name
self.output_name_map[output_node.name] = new_output_name
def filter_format(self):
filter_format_value = ConverterUtil.get_arg(self._model,
MaceKeyword.mace_filter_format_str).i # noqa
filter_format = None
if filter_format_value == DataFormat.HWIO.value:
filter_format = DataFormat.HWIO
elif filter_format_value == DataFormat.OIHW.value:
filter_format = DataFormat.OIHW
elif filter_format_value == DataFormat.HWOI.value:
filter_format = DataFormat.HWOI
else:
mace_check(False, "filter format %d not supported" %
filter_format_value)
return filter_format
def set_filter_format(self, filter_format):
arg = ConverterUtil.get_arg(self._model,
MaceKeyword.mace_filter_format_str)
arg.i = filter_format.value
def construct_ops_and_consumers(self, key):
self._ops.clear()
self._consumers.clear()
self._producer.clear()
for op in self._model.op:
self._ops[op.name] = op
for tensor in self._model.tensors:
self._consts[tensor.name] = tensor
for op in self._ops.values():
for input_tensor in op.input:
if input_tensor not in self._consumers:
self._consumers[input_tensor] = []
self._consumers[input_tensor].append(op)
for output_tensor in op.output:
self._producer[output_tensor] = op
if key != TransformerRule.SORT_BY_EXECUTION:
for input_node in self._option.input_nodes.values():
input_node_existed = False
for op in self._model.op:
if input_node.name in op.output:
input_node_existed = True
break
if not input_node_existed:
op = mace_pb2.OperatorDef()
op.name = self.normalize_op_name(input_node.name)
op.type = "Input"
data_type_arg = op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = input_node.data_type
op.output.extend([input_node.name])
output_shape = op.output_shape.add()
output_shape.dims.extend(input_node.shape)
if input_node.data_format != DataFormat.NONE:
if input_node.data_format == DataFormat.NCHW:
self.transpose_shape(output_shape.dims,
[0, 3, 1, 2])
ConverterUtil.add_data_format_arg(op,
DataFormat.AUTO)
else:
ConverterUtil.add_data_format_arg(op,
DataFormat.NONE)
self._producer[op.output[0]] = op
@staticmethod
def replace(obj_list, source, target):
for i in six.moves.range(len(obj_list)):
if obj_list[i] == source:
obj_list[i] = target
@staticmethod
def transpose_shape(shape, order):
transposed_shape = []
for i in six.moves.range(len(order)):
transposed_shape.append(shape[order[i]])
shape[:] = transposed_shape[:]
@staticmethod
def normalize_op_name(name):
return name.replace(':', '_')
def get_tensor_shape(self, tensor):
if tensor in self._consts:
return list(self._consts[tensor].dims)
elif tensor in self._producer:
producer = self._producer[tensor]
for i in six.moves.range(len(producer.output)):
if producer.output[i] == tensor:
return list(producer.output_shape[i].dims)
else:
return None
def get_tensor_data_type(self, tensor):
if tensor in self._consts:
return self._consts[tensor].data_type
elif tensor in self._producer:
producer = self._producer[tensor]
for i in six.moves.range(len(producer.output)):
if producer.output[i] == tensor:
if i < len(producer.output_type):
return producer.output_type[i]
elif ConverterUtil.get_arg(producer, "T") is not None:
return ConverterUtil.get_arg(producer, "T").i
else:
print("No data type filled: ", producer)
return None
else:
return None
def get_tensor_data_format(self, tensor):
if tensor in self._producer:
producer = self._producer[tensor]
return ConverterUtil.data_format(producer)
else:
return DataFormat.NONE
def consumer_count(self, tensor_name):
return len(self._consumers.get(tensor_name, []))
def is_op_output_node(self, op):
output_node_tensor_names = [out for out in
self._option.output_nodes]
for output in op.output:
if output in output_node_tensor_names:
return True
return False
def safe_remove_node(self, op, replace_op, remove_input_tensor=False):
"""remove op.
1. change the inputs of its consumers to the outputs of replace_op
2. if the op is output node, change output node to replace op"""
if replace_op is None:
# When no replace op specified, we change the inputs of
# its consumers to the input of the op. This handles the case
# that the op is identity op and its input is a tensor.
mace_check(len(op.output) == 1 and len(op.input) == 1,
"cannot remove op that w/o replace op specified"
" and input/output length > 1\n" + str(op))
for consumer_op in self._consumers.get(op.output[0], []):
self.replace(consumer_op.input, op.output[0], op.input[0])
mace_check(op.output[0] not in self._option.output_nodes,
"cannot remove op that is output node")
else:
mace_check(len(op.output) == len(replace_op.output),
"cannot remove op since len(op.output) "
"!= len(replace_op.output)")
for i in six.moves.range(len(op.output)):
# if the op is output node, change replace_op output name
# to the op output name
if op.output[i] in self._option.output_nodes:
for consumer in self._consumers.get(
replace_op.output[i], []):
self.replace(consumer.input,
replace_op.output[i],
op.output[i])
replace_op.output[i] = op.output[i]
else:
for consumer_op in self._consumers.get(op.output[i], []):
self.replace(consumer_op.input,
op.output[i],
replace_op.output[i])
if remove_input_tensor:
for input_name in op.input:
if input_name in self._consts:
const_tensor = self._consts[input_name]
self._model.tensors.remove(const_tensor)
self._model.op.remove(op)
def add_in_out_tensor_info(self):
net = self._model
for input_node in self._option.input_nodes.values():
input_info = net.input_info.add()
input_info.name = input_node.name
input_info.data_format = input_node.data_format.value
input_info.dims.extend(input_node.shape)
input_info.data_type = input_node.data_type
output_nodes = self._option.check_nodes.values()
for output_node in output_nodes:
output_info = net.output_info.add()
output_info.name = output_node.name
output_info.data_format = output_node.data_format.value
output_info.dims.extend(
self._producer[output_node.name].output_shape[0].dims)
output_info.data_type = output_node.data_type
return False
def remove_useless_op(self):
net = self._model
for op in net.op:
if op.type == 'Identity':
print("Remove useless op: %s(%s)" % (op.name, op.type))
self.safe_remove_node(op,
self._producer.get(op.input[0], None))
return True
elif op.type == 'Reshape' and \
op.output_shape[0].dims == \
self.get_tensor_shape(op.input[0]):
print("Remove useless reshape: %s(%s)" % (op.name, op.type))
self.safe_remove_node(op,
self._producer.get(op.input[0], None))
return True
return False
def transform_global_pooling(self):
net = self._model
for op in net.op:
if op.type == MaceOp.Pooling.name and \
ConverterUtil.get_arg(op,
MaceKeyword.mace_global_pooling_str) is not None: # noqa
print("Transform global pooling: %s(%s)" % (op.name, op.type))
input_shape = self._producer[op.input[0]].output_shape[0].dims
if ConverterUtil.data_format(op) == DataFormat.NHWC:
kernel_shape = input_shape[1:3]
else:
kernel_shape = input_shape[2:4]
ConverterUtil.get_arg(op,
MaceKeyword.mace_kernel_str).ints[:] \
= kernel_shape[:]
return False
def fold_batchnorm(self):
net = self._model
for op in net.op:
if (op.type == MaceOp.Eltwise.name
and ConverterUtil.get_arg(
op, MaceKeyword.mace_element_type_str).i
== EltwiseType.PROD.value) \
and len(op.input) == 2 \
and op.input[1] in self._consts \
and op.output_shape[0].dims[-1:] == \
self._consts[op.input[1]].dims \
and self.consumer_count(op.output[0]) == 1 \
and not self.is_op_output_node(op):
consumer_op = self._consumers[op.output[0]][0]
if (consumer_op.type == MaceOp.Eltwise.name
and ConverterUtil.get_arg(
consumer_op, MaceKeyword.mace_element_type_str).i
== EltwiseType.SUM.value
or consumer_op.type == MaceOp.BiasAdd.name) \
and len(consumer_op.input) == 2 \
and consumer_op.input[1] in self._consts \
and len(self._consts[consumer_op.input[1]].dims) == 1:
print("Fold batchnorm: %s(%s)" % (op.name, op.type))
consumer_op.type = MaceOp.BatchNorm.name
consumer_op.input[:] = [op.input[0], op.input[1],
consumer_op.input[1]]
net.op.remove(op)
return True
return False
def fold_squared_diff_mean(self):
net = self._model
for op in net.op:
if op.type == MaceOp.Eltwise.name and len(op.input) == 2:
elt_type = ConverterUtil.get_arg(
op,
MaceKeyword.mace_element_type_str).i
if elt_type == EltwiseType.SQR_DIFF.value and\
self.consumer_count(op.output[0]) == 1:
consumer_op = self._consumers[op.output[0]][0]
if consumer_op.type == MaceOp.Reduce.name:
axis = ConverterUtil.get_arg(
consumer_op,
MaceKeyword.mace_axis_str).ints
keep_dims = ConverterUtil.get_arg(
consumer_op,
MaceKeyword.mace_keepdims_str).i
reduce_type = ConverterUtil.get_arg(
consumer_op,
MaceKeyword.mace_reduce_type_str).i
if reduce_type == ReduceType.MEAN.value and\
len(consumer_op.input) == 1 and\
axis[0] == 1 and axis[1] == 2 and\
keep_dims > 0:
print("Fold SquaredDiff Reduce: %s" % op.name)
op.type = MaceOp.SqrDiffMean.name
op.output[0] = consumer_op.output[0]
self.replace_quantize_info(op, consumer_op)
self.safe_remove_node(consumer_op, op)
return True
return False
def fold_embedding_lookup(self):
net = self._model
for op in net.op:
# gather -> mul
if (op.type == MaceOp.Gather.name and
self.consumer_count(op.output[0]) == 1):
consumer_op = self._consumers[op.output[0]][0]
if (consumer_op.type == MaceOp.Eltwise.name and
ConverterUtil.get_arg(consumer_op,
MaceKeyword.mace_element_type_str).i == EltwiseType.PROD.value and # noqa
len(consumer_op.input) == 1 and
op.input[0] in self._consts and
self.consumer_count(op.input[0]) == 1):
print("Fold Gather and Mul: %s" % op.name)
gather_weights = self._consts[op.input[0]]
mul_weight = ConverterUtil.get_arg(consumer_op,
MaceKeyword.mace_scalar_input_str).f # noqa
gather_weights.float_data[:] = [float_data * mul_weight for float_data in gather_weights.float_data] # noqa
self.safe_remove_node(consumer_op, None,
remove_input_tensor=True)
def transform_lstmcell_zerostate(self):
net = self._model
zero_state_pattern = \
re.compile(r'^.*BasicLSTMCellZeroState_?[0-9]*/[a-zA-Z]+_?[0-9]*') # noqa
for op in net.op:
if op.type == MaceOp.Fill.name and \
zero_state_pattern.match(op.name):
print("Transform lstm zerostate")
concat_op = self._producer[op.input[0]]
consumer_op = self._consumers[op.output[0]][0]
dims = [self._consts[concat_op.input[0]].int32_data[0],
self._consts[concat_op.input[1]].int32_data[0]]
tensor_def = net.tensors.add()
tensor_def.name = op.output[0].replace('/zeros', '/init_const')
tensor_def.dims.extend(dims)
tensor_def.data_type = self._consts[op.input[1]].data_type
tensor_def.float_data.extend(
[self._consts[op.input[1]].float_data[0]] *
(dims[0] * dims[1]))
for i in range(len(consumer_op.input)):
if zero_state_pattern.match(consumer_op.input[i][:-2]):
consumer_op.input[i] = tensor_def.name
net.tensors.remove(self._consts[op.input[1]])
net.tensors.remove(self._consts[concat_op.input[0]])
net.tensors.remove(self._consts[concat_op.input[1]])
net.op.remove(concat_op)
net.op.remove(op)
return True
def transform_basic_lstmcell(self):
if self._option.device != DeviceType.GPU.value:
return False
net = self._model
basic_lstm_concat_pattern = \
re.compile(r'^.*basic_lstm_cell_?[0-9]*/concat_?[0-9]*')
for op in net.op:
if op.type == MaceOp.Concat.name and \
basic_lstm_concat_pattern.match(op.name):
print("Transform basic lstmcell")
ops_to_delete = []
ops_to_delete.extend([op])
op_def = net.op.add()
op_def.name = op.name.replace('/concat', '/folded_lstmcell')
op_def.type = MaceOp.LSTMCell.name
op_def.arg.extend(op.arg[:-1])
# Concat pre output and cur input
# extend concat inputs
op_def.input.extend([op_input for op_input in op.input])
# lstm MatMul in FC of [pre_output, cur_input]
matmul_op = self._consumers[op.output[0]][0]
ops_to_delete.extend([matmul_op])
# extend MatMul weight input
op_def.input.extend([matmul_op.input[1]])
# lstm BiasAdd in FC of [pre_output, cur_input]
biasadd_op = self._consumers[matmul_op.output[0]][0]
ops_to_delete.extend([biasadd_op])
# extend BiasAdd bias input
op_def.input.extend([biasadd_op.input[1]])
# Split FC output into i, j, f, o
# i = input_gate, j = new_input, f = forget_gate, o = output_gate # noqa
split_op = self._consumers[biasadd_op.output[0]][0]
ops_to_delete.extend([split_op])
# input gate activation
input_gate_op = self._consumers[split_op.output[0]][0]
ops_to_delete.extend([input_gate_op])
# new input gate
new_input_tanh_op = self._consumers[split_op.output[1]][0]
ops_to_delete.extend([new_input_tanh_op])
# forget gate add
forget_add_op = self._consumers[split_op.output[2]][0]
ops_to_delete.extend([forget_add_op])
# output gate activation
output_gate_op = self._consumers[split_op.output[3]][0]
ops_to_delete.extend([output_gate_op])
# extend forget add
mace_check(len(forget_add_op.input) == 1,
'Wrong LSTM format in forget gate inputs')
for arg in forget_add_op.arg:
if arg.name == MaceKeyword.mace_scalar_input_str:
op_def.arg.extend([arg])
# state remember
remember_mul_op = self._consumers[input_gate_op.output[0]][0]
ops_to_delete.extend([remember_mul_op])
mace_check(remember_mul_op.name == self._consumers[
new_input_tanh_op.output[0]][0].name,
'Wrong LSTM format in input sig & input tanh mul')
# forget gate activation
forget_gate_op = self._consumers[forget_add_op.output[0]][0]
ops_to_delete.extend([forget_gate_op])
# Mul `forget` & `pre cell state`
forget_mul_op = self._consumers[forget_gate_op.output[0]][0]
ops_to_delete.extend([forget_mul_op])
# extend pre cell state input
op_def.input.extend([forget_mul_op.input[0]])
# get cur cell state
# Add `forget gate output` & `remember mul output`
remember_forget_add_op = \
self._consumers[remember_mul_op.output[0]][0]
ops_to_delete.extend([remember_forget_add_op])
mace_check(remember_forget_add_op.name ==
self._consumers[forget_mul_op.output[0]][0].name,
'Wrong LSTM format in add forget gate & remember mul') # noqa
op_def.output.extend([remember_forget_add_op.output[0]])
op_def.output_shape.extend(remember_forget_add_op.output_shape)
# cell state output tanh
for consumer in \
self._consumers[remember_forget_add_op.output[0]]:
if consumer.type == MaceOp.Activation.name and \
consumer.name.find('basic_lstm_cell') > 0:
cell_tanh_op = consumer
ops_to_delete.extend([cell_tanh_op])
# final mul, get output
final_mul_op = self._consumers[cell_tanh_op.output[0]][0]
ops_to_delete.extend([final_mul_op])
mace_check(final_mul_op.name ==
self._consumers[output_gate_op.output[0]][0].name,
'Wrong LSTM format in final mul')
op_def.output.extend([final_mul_op.output[0]])
op_def.output_shape.extend(final_mul_op.output_shape)
for op_to_del in ops_to_delete:
net.op.remove(op_to_del)
return True
return False
def fold_conv_and_bn(self):
net = self._model
for op in net.op:
if (op.type == MaceOp.Conv2D.name) \
and self.consumer_count(op.output[0]) == 1:
consumer_op = self._consumers[op.output[0]][0]
input_len = len(op.input)
if (consumer_op.type == MaceOp.BatchNorm.name
and (input_len == 2 or (input_len == 3 and op.input[-1] in self._consts)) # noqa
and len(self._consumers[op.input[1]]) == 1):
print("Fold conv and bn: %s(%s)" % (op.name, op.type))
filter = self._consts[op.input[1]]
scale = self._consts[consumer_op.input[1]]
offset = self._consts[consumer_op.input[2]]
idx = 0
filter_format = self.filter_format()
if filter_format == DataFormat.HWIO:
for hwi in six.moves.range(filter.dims[0]
* filter.dims[1]
* filter.dims[2]):
for o in six.moves.range(filter.dims[3]):
filter.float_data[idx] *= scale.float_data[o]
idx += 1
elif filter_format == DataFormat.OIHW:
for o in six.moves.range(filter.dims[0]):
for hwi in six.moves.range(filter.dims[1]
* filter.dims[2]
* filter.dims[3]):
filter.float_data[idx] *= scale.float_data[o]
idx += 1
else:
mace_check(False, "filter format %s not supported" %
filter_format)
if len(op.input) == 3:
conv_bias = self._consts[op.input[2]]
for c in six.moves.range(conv_bias.dims[0]):
conv_bias.float_data[c] *= scale.float_data[c]
conv_bias.float_data[c] += offset.float_data[c]
net.tensors.remove(offset)
else:
op.input.extend([consumer_op.input[2]])
# remove bn
del consumer_op.input[:]
net.tensors.remove(scale)
self.replace_quantize_info(op, consumer_op)
self.safe_remove_node(consumer_op, op)
return True
return False
def fold_deconv_and_bn(self):
net = self._model
for op in net.op:
if (op.type in [MaceOp.Deconv2D.name, MaceOp.DepthwiseDeconv2d]) \
and self.consumer_count(op.output[0]) == 1:
consumer_op = self._consumers[op.output[0]][0]
framework = ConverterUtil.get_arg(
op, MaceKeyword.mace_framework_type_str).i
input_len = len(op.input)
if (consumer_op.type == MaceOp.BatchNorm.name and (
(framework == FrameworkType.CAFFE.value and
(input_len == 2 or (input_len == 3 and
op.input[-1] in self._consts))) or
(framework == FrameworkType.TENSORFLOW.value and
(input_len == 3 or (input_len == 4 and
op.input[-1] in self._consts))))
and len(self._consumers[op.input[1]]) == 1):
print("Fold deconv and bn: %s(%s)" % (op.name, op.type))
filter = self._consts[op.input[1]]
scale = self._consts[consumer_op.input[1]]
offset = self._consts[consumer_op.input[2]]
idx = 0
filter_format = self.filter_format()
# in deconv op O and I channel is switched
if filter_format == DataFormat.HWIO:
for hw in six.moves.range(filter.dims[0]
* filter.dims[1]):
for o in six.moves.range(filter.dims[2]):
for i in six.moves.range(filter.dims[3]):
filter.float_data[idx] *=\
scale.float_data[o]
idx += 1
elif filter_format == DataFormat.OIHW:
for i in six.moves.range(filter.dims[0]):
for o in six.moves.range(filter.dims[1]):
for hw in six.moves.range(filter.dims[2]
* filter.dims[3]):
filter.float_data[idx] *=\
scale.float_data[o]
idx += 1
else:
mace_check(False, "filter format %s not supported" %
filter_format)
bias_dim = -1
if framework == FrameworkType.CAFFE.value \
and len(op.input) == 3:
bias_dim = 2
if framework == FrameworkType.TENSORFLOW.value \
and len(op.input) == 4:
bias_dim = 3
if bias_dim != -1:
conv_bias = self._consts[op.input[bias_dim]]
for c in six.moves.range(conv_bias.dims[0]):
conv_bias.float_data[c] *= scale.float_data[c]
conv_bias.float_data[c] += offset.float_data[c]
net.tensors.remove(offset)
else:
op.input.extend([consumer_op.input[2]])
del consumer_op.input[:]
net.tensors.remove(scale)
self.replace_quantize_info(op, consumer_op)
self.safe_remove_node(consumer_op, op)
return True
return False
def fold_depthwise_conv_and_bn(self):
net = self._model
for op in net.op:
if op.type == MaceOp.DepthwiseConv2d.name \
and self.consumer_count(op.output[0]) == 1:
consumer_op = self._consumers[op.output[0]][0]
input_len = len(op.input)
if (consumer_op.type == MaceOp.BatchNorm.name
and (input_len == 2 or (input_len == 3 and op.input[-1] in self._consts)) # noqa
and len(self._consumers[op.input[1]]) == 1):
print("Fold depthwise conv and bn: %s(%s)"
% (op.name, op.type))
filter = self._consts[op.input[1]]
scale = self._consts[consumer_op.input[1]]
offset = self._consts[consumer_op.input[2]]
idx = 0
filter_format = self.filter_format()
if filter_format == DataFormat.HWIO:
for hw in six.moves.range(filter.dims[0]
* filter.dims[1]):
for i in six.moves.range(filter.dims[2]):
for o in six.moves.range(filter.dims[3]):
filter.float_data[idx] *= scale.float_data[
i * filter.dims[3] + o]
idx += 1
elif filter_format == DataFormat.OIHW:
for o in six.moves.range(filter.dims[0]):
for i in six.moves.range(filter.dims[1]):
for hw in six.moves.range(filter.dims[2]
* filter.dims[3]):
filter.float_data[idx] *= scale.float_data[
i * filter.dims[0] + o]
idx += 1
else:
mace_check(False, "filter format %s not supported" %
filter_format)
if len(op.input) == 3:
conv_bias = self._consts[op.input[2]]
for c in six.moves.range(conv_bias.dims[0]):
conv_bias.float_data[c] *= scale.float_data[c]
conv_bias.float_data[c] += offset.float_data[c]
net.tensors.remove(offset)
else:
op.input.extend([consumer_op.input[2]])
# remove bn
del consumer_op.input[:]
net.tensors.remove(scale)
self.replace_quantize_info(op, consumer_op)
self.safe_remove_node(consumer_op, op)
return True
return False
@staticmethod
def sort_feature_map_shape(shape, data_format):
"""Return shape in NHWC order"""
batch = shape[0]
if data_format == DataFormat.NHWC:
height = shape[1]
width = shape[2]
channels = shape[3]
else:
height = shape[2]
width = shape[3]
channels = shape[1]
return batch, height, width, channels
@staticmethod
def sort_filter_shape(filter_shape, filter_format):
"""Return filter shape in HWIO order"""
if filter_format == DataFormat.HWIO:
filter_height = filter_shape[0]
filter_width = filter_shape[1]
in_channels = filter_shape[2]
out_channels = filter_shape[3]
elif filter_format == DataFormat.OIHW:
filter_height = filter_shape[2]
filter_width = filter_shape[3]
in_channels = filter_shape[1]
out_channels = filter_shape[0]
elif filter_format == DataFormat.HWOI:
filter_height = filter_shape[0]
filter_width = filter_shape[1]
in_channels = filter_shape[3]
out_channels = filter_shape[2]
else:
mace_check(False, "filter format %s not supported" % filter_format)
return filter_height, filter_width, in_channels, out_channels
def transform_add_to_biasadd(self):
net = self._model
for op in net.op:
if (op.type == 'Eltwise'
and ConverterUtil.get_arg(op, MaceKeyword.mace_element_type_str).i == EltwiseType.SUM.value # noqa
and len(op.input) == 2
and op.input[1] in self._consts
and len(self._consts[op.input[1]].dims) == 1):
print("Transform add to biasadd: %s(%s)" % (op.name, op.type))
op.type = MaceOp.BiasAdd.name
return True
return False
def replace_quantize_info(self, op, replace_op):
if len(replace_op.quantize_info) > 0:
del op.quantize_info[:]
op.quantize_info.extend(replace_op.quantize_info)
for i in range(len(op.quantize_info)):
self._quantize_activation_info[op.output[i]] = \
op.quantize_info[i]
def fold_biasadd(self):
net = self._model
for op in net.op:
if (((op.type == MaceOp.Conv2D.name
or op.type == MaceOp.DepthwiseConv2d.name
or op.type == MaceOp.FullyConnected.name)
and len(op.input) == 2)
or (op.type == MaceOp.Deconv2D.name
and ((ConverterUtil.get_arg(
op,
MaceKeyword.mace_framework_type_str).i ==
FrameworkType.CAFFE.value
and len(op.input) == 2)
or (ConverterUtil.get_arg(
op,
MaceKeyword.mace_framework_type_str).i
== FrameworkType.TENSORFLOW.value
and len(op.input) == 3)))) \
and len(self._consumers.get(op.output[0], [])) == 1:
consumer_op = self._consumers[op.output[0]][0]
if consumer_op.type == MaceOp.BiasAdd.name:
print("Fold biasadd: %s(%s)" % (op.name, op.type))
op.name = consumer_op.name
op.output[0] = consumer_op.output[0]
op.input.append(consumer_op.input[1])
self.replace_quantize_info(op, consumer_op)
self.safe_remove_node(consumer_op, op)
return True
return False
def flatten_atrous_conv(self):
if self._option.device != DeviceType.GPU.value \
and self._option.device != DeviceType.APU.value \
and self._option.device != DeviceType.HTA.value:
return
net = self._model
for op in net.op:
if (op.type == MaceOp.SpaceToBatchND.name
and len(self._consumers.get(op.output[0], [])) == 1):
conv_op = self._consumers.get(op.output[0])[0]
if (conv_op.type == MaceOp.Conv2D.name
or conv_op.type == MaceOp.DepthwiseConv2d.name) \
and len(self._consumers.get(conv_op.output[0], [])) == 1: # noqa
b2s_op = self._consumers.get(conv_op.output[0])[0]
if b2s_op.type == MaceOp.BatchToSpaceND.name:
six.print_("Flatten atrous convolution")
# Add args.
padding_arg_values = ConverterUtil.get_arg(
op,
MaceKeyword.mace_paddings_str).ints
blocks_arg_values = ConverterUtil.get_arg(
b2s_op,
MaceKeyword.mace_space_batch_block_shape_str).ints
dilation_arg = ConverterUtil.get_arg(
conv_op,
MaceKeyword.mace_dilations_str)
if dilation_arg is None:
dilation_arg = conv_op.arg.add()
dilation_arg.name = MaceKeyword.mace_dilations_str
dilation_arg.ints[:] = blocks_arg_values
padding_arg = ConverterUtil.get_arg(
conv_op,
MaceKeyword.mace_padding_str)
if padding_arg is None:
padding_arg = conv_op.arg.add()
padding_arg.name = MaceKeyword.mace_padding_str
if len(padding_arg_values) > 0 \
and padding_arg_values[0] > 0:
padding_arg.i = PaddingMode.SAME.value
else:
padding_arg.i = PaddingMode.VALID.value
strides_arg = ConverterUtil.get_arg(
conv_op,
MaceKeyword.mace_strides_str)
if strides_arg is None:
strides_arg = conv_op.arg.add()
strides_arg.name = MaceKeyword.mace_strides_str
strides_arg.ints[:] = [1, 1]
# update output shape
conv_op.output_shape[0].dims[:] = \
b2s_op.output_shape[0].dims[:]
conv_op.output[0] = b2s_op.output[0]
conv_op.name = b2s_op.name
self.safe_remove_node(op, None)
self.replace_quantize_info(b2s_op, conv_op)
self.safe_remove_node(b2s_op, conv_op)
return True
return False
def fold_activation(self):
net = self._model
for op in net.op:
if (op.type == MaceOp.Conv2D.name
or op.type == MaceOp.Deconv2D.name
or op.type == MaceOp.DepthwiseConv2d.name
or op.type == MaceOp.FullyConnected.name
or op.type == MaceOp.BatchNorm.name) \
and len(self._consumers.get(op.output[0], [])) == 1:
consumer_op = self._consumers[op.output[0]][0]
if consumer_op.type == MaceOp.Activation.name:
act_type_arg = ConverterUtil.get_arg(
consumer_op, MaceKeyword.mace_activation_type_str)
act_type = act_type_arg.s.decode()
if act_type == ActivationType.PRELU.name:
continue
# during quantization, only fold relu/relux
if (self._option.quantize_stat or self._option.quantize) \
and act_type not in [ActivationType.RELU.name,
ActivationType.RELUX.name]:
continue
print("Fold activation: %s(%s)" % (op.name, op.type))
op.name = consumer_op.name
op.output[0] = consumer_op.output[0]
for arg in consumer_op.arg:
if arg.name == MaceKeyword.mace_activation_type_str \
or arg.name == \
MaceKeyword.mace_activation_max_limit_str \
or arg.name == MaceKeyword.mace_activation_leakyrelu_coefficient_str: # noqa
op.arg.extend([arg])
self.replace_quantize_info(op, consumer_op)
self.safe_remove_node(consumer_op, op)
return True
return False
def transform_global_conv_to_fc(self):
"""Transform global conv to fc should be placed after transposing
input/output and filter"""
if self._option.quantize:
return
net = self._model
for op in net.op:
if op.type == MaceOp.Conv2D.name \
and len(op.input) >= 2 \
and op.input[1] in self._consts:
producer = self._producer[op.input[0]]
input_shape = producer.output_shape[0].dims
batch, height, width, channels = self.sort_feature_map_shape(
input_shape, ConverterUtil.data_format(producer))
filter = self._consts[op.input[1]]
filter_shape = filter.dims
filter_height, filter_width, in_channels, out_channels = \
self.sort_filter_shape(filter_shape, self.filter_format())
zero_padding = True
padding_arg = ConverterUtil.get_arg(op,
MaceKeyword.mace_padding_str) # noqa
if padding_arg is not None:
if padding_arg.i != PaddingMode.VALID.value:
zero_padding = False
else:
padding_value_arg = ConverterUtil.get_arg(op,
MaceKeyword.mace_padding_values_str) # noqa
if padding_value_arg is not None:
if not all(v == 0 for v in padding_value_arg.ints):
zero_padding = False
if height == filter_height and width == filter_width \
and zero_padding \
and len(self._consumers[op.input[1]]) == 1:
print("transform global conv to fc %s(%s)"
% (op.name, op.type))
op.type = MaceOp.FullyConnected.name
return False
def reshape_fc_weight(self):
net = self._model
filter_format = self.filter_format()
for op in net.op:
if op.type == MaceOp.FullyConnected.name:
weight = self._consts[op.input[1]]
if len(weight.dims) == 2:
print("Reshape fully connected weight shape")
input_op = self._producer[op.input[0]]
input_shape = list(input_op.output_shape[0].dims)
weight.dims[:] = [weight.dims[0]] + input_shape[1:]
if len(input_shape) == 2:
if filter_format == DataFormat.HWIO:
weight.dims[:] = [1, 1] + weight.dims[:]
elif filter_format == DataFormat.OIHW:
weight.dims[:] = weight.dims[:] + [1, 1]
else:
mace_check(False,
"FC does not support filter format %s" %
filter_format.name)
return False
def add_winograd_arg(self):
if self._wino_arg == 0:
return False
net = self._model
for op in net.op:
if op.type == MaceOp.Conv2D.name:
winograd_arg = op.arg.add()
winograd_arg.name = MaceKeyword.mace_wino_arg_str
winograd_arg.i = self._wino_arg
return False
def transpose_matmul_weight(self):
if self._option.device != DeviceType.CPU.value:
return False
net = self._model
transposed_weights = []
for op in net.op:
if op.type == MaceOp.MatMul.name: # noqa
rhs = op.input[1]
if rhs in self._consts and len(self._consts[rhs].dims) == 2:
arg = ConverterUtil.get_arg(op, MaceKeyword.mace_transpose_b_str) # noqa
# six.print_("Transpose matmul weight %s" % rhs)
if arg is None:
arg = op.arg.add()
arg.name = MaceKeyword.mace_transpose_b_str
arg.i = 0
if arg.i == 0:
arg.i = 1
if rhs not in transposed_weights:
filter = self._consts[rhs]
filter_data = np.array(filter.float_data).reshape(
filter.dims)
filter_data = filter_data.transpose(1, 0)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_weights.append(rhs)
six.print_('Transpose matmul weight to shape:',
filter.dims)
def transpose_filters(self):
net = self._model
filter_format = self.filter_format()
transposed_filter = set()
transposed_deconv_filter = set()
if self._option.quantize and \
(self._option.device == DeviceType.CPU.value or
self._option.device == DeviceType.APU.value):
print("Transpose filters to OHWI")
if filter_format == DataFormat.HWIO:
transpose_order = [3, 0, 1, 2]
elif filter_format == DataFormat.OIHW:
transpose_order = [0, 2, 3, 1]
else:
mace_check(False, "Quantize model does not support conv "
"filter format: %s" % filter_format.name)
for op in net.op:
if (op.type == MaceOp.Conv2D.name or
op.type == MaceOp.Deconv2D.name or
(op.type == MaceOp.DepthwiseConv2d.name and
self._option.device == DeviceType.APU.value) or
(op.type == MaceOp.FullyConnected.name and
len(self._consts[op.input[1]].dims) == 4)) and \
op.input[1] not in transposed_filter:
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(
filter.dims)
filter_data = filter_data.transpose(transpose_order)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_filter.add(op.input[1])
elif op.type == MaceOp.DepthwiseConv2d.name and\
filter_format == DataFormat.OIHW and\
self._option.device == DeviceType.CPU.value and\
op.input[1] not in transposed_filter:
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(
filter.dims)
filter_data = filter_data.transpose(2, 3, 1, 0)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_filter.add(op.input[1])
# deconv's filter's output channel and input channel is reversed
for op in net.op:
if op.type == MaceOp.Deconv2D.name and \
op.input[1] not in transposed_deconv_filter:
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(
filter.dims)
filter_data = filter_data.transpose(3, 1, 2, 0)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_deconv_filter.add(op.input[1])
self.set_filter_format(DataFormat.OHWI)
elif self._option.quantize and \
(self._option.device == DeviceType.HEXAGON.value or
self._option.device == DeviceType.HTA.value):
for op in net.op:
# from HWOI to OHWI, deconv is unique
if op.type == MaceOp.Deconv2D.name \
and op.input[1] in self._consts \
and op.input[1] not in transposed_deconv_filter:
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(
filter.dims)
filter_data = filter_data.transpose(2, 0, 1, 3)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_deconv_filter.add(op.input[1])
print("Transpose filters to HWIO/HWIM")
mace_check(filter_format == DataFormat.HWIO,
"HEXAGON only support HWIO/HWIM filter format.")
else:
# transpose filter to OIHW/MIHW for tensorflow (HWIO/HWIM)
if filter_format == DataFormat.HWIO:
for op in net.op:
if (op.type == MaceOp.Conv2D.name
or op.type == MaceOp.Deconv2D.name
or op.type == MaceOp.DepthwiseConv2d.name) \
and op.input[1] in self._consts \
and op.input[1] not in transposed_filter:
print("Transpose Conv2D/Deconv2D filters to OIHW/MIHW")
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(
filter.dims)
filter_data = filter_data.transpose(3, 2, 0, 1)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_filter.add(op.input[1])
if (op.type == MaceOp.MatMul.name and
(ConverterUtil.get_arg(
op,
MaceKeyword.mace_winograd_filter_transformed)
is not None) # noqa
and op.input[1] not in transposed_filter):
print("Transpose Winograd filters to OIHW/MIHW")
filter = self._consts[op.input[0]]
filter_data = np.array(filter.float_data).reshape(
filter.dims)
filter_data = filter_data.transpose(3, 2, 0, 1)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_filter.add(op.input[0])
if op.type == MaceOp.FullyConnected.name \
and op.input[1] not in transposed_filter:
weight = self._consts[op.input[1]]
if len(weight.dims) == 4:
print("Transpose FullyConnected filters to"
" OIHW/MIHW")
weight_data = np.array(weight.float_data).reshape(
weight.dims)
weight_data = weight_data.transpose(3, 2, 0, 1)
weight.float_data[:] = weight_data.flat
weight.dims[:] = weight_data.shape
transposed_filter.add(op.input[1])
self.set_filter_format(DataFormat.OIHW)
# deconv's filter's output channel and input channel is reversed
for op in net.op:
if op.type in [MaceOp.Deconv2D.name,
MaceOp.DepthwiseDeconv2d] \
and op.input[1] not in transposed_deconv_filter:
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(
filter.dims)
filter_data = filter_data.transpose(1, 0, 2, 3)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_deconv_filter.add(op.input[1])
return False
def fold_reshape(self):
net = self._model
for op in net.op:
if op.type == MaceOp.Softmax.name:
# see if possible to fold
# Reshape(xd->2d) + Softmax(2d) [+ Reshape(xd)] to Softmax(xd)
should_fold = False
if op.input[0] in self._producer \
and self._producer[op.input[0]].type \
== MaceOp.Reshape.name \
and len(op.output_shape[0].dims) == 2:
producer = self._producer[op.input[0]]
reshape_input_rank = len(self.get_tensor_shape(
producer.input[0]))
if reshape_input_rank == 4:
should_fold = True
if should_fold:
print(
"Fold reshape and softmax: %s(%s)"
% (op.name, op.type))
producer = self._producer[op.input[0]]
op.output_shape[0].dims[:] = self.get_tensor_shape(
producer.input[0])
if op.output[0] in self._consumers:
consumer = self._consumers[op.output[0]][0]
# if there is a shape op, remove it too
if len(consumer.input) > 1:
if (consumer.input[1] in self._producer
and self._producer[consumer.input[1]].type
== 'Shape'):
self.safe_remove_node(
self._producer[consumer.input[1]], None,
remove_input_tensor=True)
# remove consumer reshape
self.safe_remove_node(consumer, op,
remove_input_tensor=True)
# remove producer reshape
self.safe_remove_node(producer,
self._producer.get(producer.input[0],
None),
remove_input_tensor=True)
return True
return False
def is_after_fc(self, op):
while op.input[0] in self._producer:
producer = self._producer[op.input[0]]
if producer.type in [MaceOp.Activation.name, MaceOp.BiasAdd.name]:
op = producer
continue
elif producer.type == MaceOp.FullyConnected.name:
return True
else:
return False
return False
def transform_matmul_to_fc(self):
net = self._model
filter_format = self.filter_format()
for op in net.op:
# transform `input(4D) -> reshape(2D) -> matmul` to `fc(2D)`
# fc output is 2D in transformer, using as 4D in op kernel
# work for TensorFlow
if op.type == MaceOp.Reshape.name and \
len(op.input) == 2 and \
op.input[1] in self._consts and \
len(op.output_shape[0].dims) == 2 and \
filter_format == DataFormat.HWIO and \
op.input[0] in self._producer:
input_op = self._producer[op.input[0]]
input_shape = input_op.output_shape[0].dims
# check input op
if len(input_shape) == 4 and \
np.prod(input_shape[1:]) == op.output_shape[0].dims[1]:
is_fc = True
consumers = self._consumers[op.output[0]]
# check matmul op
for matmul_op in consumers:
if matmul_op.type != MaceOp.MatMul.name:
is_fc = False
else:
weight = self._consts[matmul_op.input[1]]
if len(weight.dims) != 2 or \
weight.dims[0] != op.output_shape[0].dims[1]:
is_fc = False
if is_fc:
print('convert reshape and matmul to fc')
self.safe_remove_node(op, input_op,
remove_input_tensor=True)
for matmul_op in consumers:
weight = self._consts[matmul_op.input[1]]
matmul_op.type = MaceOp.FullyConnected.name
weight_data = np.array(weight.float_data).reshape(
weight.dims)
weight.dims[:] = input_shape[1:] + \
[weight_data.shape[1]]
return True
# transform `fc1(2D) -> matmul` to `fc1(2D) -> fc1(2D)`
if op.type == MaceOp.MatMul.name and \
filter_format == DataFormat.HWIO and \
op.input[1] in self._consts:
producer = self._producer[op.input[0]]
weight = self._consts[op.input[1]]
if len(weight.dims) == 2 and self.is_after_fc(op) and \
len(producer.output_shape[0].dims) == 2 and \
weight.dims[0] == producer.output_shape[0].dims[1]:
six.print_('convert matmul to fc')
op.type = MaceOp.FullyConnected.name
weight_data = np.array(weight.float_data).reshape(
weight.dims)
weight.dims[:] = [1, 1] + list(weight_data.shape)
return True
return False
def update_float_op_data_type(self):
print("update op with float data type")
net = self._model
data_type = self._option.data_type
net.data_type = data_type
if self._option.quantize:
return
for op in net.op:
data_type_arg = ConverterUtil.get_arg(
op, MaceKeyword.mace_op_data_type_str)
if not data_type_arg:
data_type_arg = op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = data_type
elif data_type_arg.i != data_type \
and data_type_arg.i == mace_pb2.DT_FLOAT:
data_type_arg.i = data_type
return False
def sort_dfs(self, op, visited, sorted_nodes):
if op.name in visited:
return
visited.update([op.name])
if len(op.input) > 0:
for input_tensor in op.input:
producer_op = self._producer.get(input_tensor, None)
if producer_op is None:
pass
elif producer_op.name not in visited:
self.sort_dfs(producer_op, visited, sorted_nodes)
sorted_nodes.append(op)
def sort_by_execution(self):
print("Sort by execution")
net = self._model
visited = set()
sorted_nodes = []
output_nodes = list(self._option.check_nodes.keys())
if not self._quantize_activation_info:
output_nodes.extend(self._option.output_nodes)
for output_node in output_nodes:
mace_check(output_node in self._producer,
"output_tensor %s not existed in model" % output_node)
self.sort_dfs(self._producer[output_node], visited, sorted_nodes)
del net.op[:]
net.op.extend(sorted_nodes)
print("Final ops:")
index = 0
for op in net.op:
if op.type not in [MaceOp.Quantize.name, MaceOp.Dequantize.name]:
index_str = str(index)
index += 1
else:
index_str = ''
print("%s (%s, index:%s): %s" % (op.name, op.type, index_str, [
out_shape.dims for out_shape in op.output_shape]))
return False
def is_transposable_data_format_ops(self, op):
transposable = op.type in MaceTransposableDataFormatOps
if op.type == MaceOp.Reshape:
input_op = self._producer[op.input[0]]
input_dims = input_op.output_shape[0].dims
output_dims = op.output_shape[0].dims
if len(input_op.output_shape) != 1 or \
len(input_dims) != 4 or len(output_dims) != 4:
transposable = False
else:
in_b, in_h, in_w, in_c = self.sort_feature_map_shape(
input_dims, ConverterUtil.data_format(input_op))
ou_b, ou_h, ou_w, ou_c = self.sort_feature_map_shape(
output_dims, ConverterUtil.data_format(op))
transposable = (in_b == ou_b and in_c == ou_c)
elif op.type == MaceOp.Squeeze:
input_dims = self._producer[op.input[0]].output_shape[0].dims
output_dims = op.output_shape[0].dims
src_df = ConverterUtil.data_format(self._model)
arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str)
if len(input_dims) == 4 and len(output_dims) == 2 and \
((src_df == DataFormat.NCHW and arg.ints == [2, 3]) or
(src_df == DataFormat.NHWC and arg.ints == [1, 2])):
transposable = True
else:
transposable = False
if op.type in MaceTransposableDataFormatOps and not transposable:
print("%s(%s) is not a transposable op in this model."
% (op.name, op.type))
return transposable
def update_data_format(self):
print("update data format")
net = self._model
for op in net.op:
df_arg = ConverterUtil.get_arg(
op, MaceKeyword.mace_data_format_str)
if not df_arg:
df_arg = op.arg.add()
df_arg.name = MaceKeyword.mace_data_format_str
if op.type in MaceFixedDataFormatOps:
df_arg.i = DataFormat.AUTO.value
elif self.is_transposable_data_format_ops(op):
input_df = DataFormat.AUTO.value
for input_tensor in op.input:
if input_tensor in self._consts:
continue
mace_check(
input_tensor in self._producer,
"Input tensor %s not in producer" % input_tensor)
father_op = self._producer[input_tensor]
temp_input_df = ConverterUtil.get_arg(
father_op, MaceKeyword.mace_data_format_str)
if temp_input_df.i != DataFormat.AUTO.value:
input_df = temp_input_df.i
if input_df == DataFormat.AUTO.value:
df_arg.i = input_df
# add flag to mark the ops may has data format
has_data_format_arg = op.arg.add()
has_data_format_arg.name = \
MaceKeyword.mace_has_data_format_str
has_data_format_arg.i = 1
return False
def transpose_data_format(self):
print("Transpose arguments based on data format")
net = self._model
src_data_format = ConverterUtil.data_format(net)
for op in net.op:
has_data_format = ConverterUtil.data_format(op) == \
DataFormat.AUTO
# transpose args
if op.type == MaceOp.Pad.name:
for arg in op.arg:
if arg.name == MaceKeyword.mace_paddings_str:
mace_check(len(arg.ints) == 8,
"pad dim rank should be 8.")
if src_data_format == DataFormat.NCHW and \
has_data_format:
print("Transpose pad args: %s(%s)"
% (op.name, op.type))
self.transpose_shape(arg.ints,
[0, 1, 4, 5, 6, 7, 2, 3])
elif op.type == MaceOp.Concat.name or op.type == MaceOp.Split.name:
for arg in op.arg:
if arg.name == MaceKeyword.mace_axis_str:
if (src_data_format == DataFormat.NCHW
and has_data_format
and len(op.output_shape[0].dims) == 4):
print("Transpose concat/split args: %s(%s)"
% (op.name, op.type))
if arg.i == 1:
arg.i = 3
elif arg.i == 2:
arg.i = 1
elif arg.i == 3:
arg.i = 2
if op.input[0] in self._producer:
producer = self._producer[op.input[0]]
input_shape = producer.output_shape[0].dims
if (producer.type == MaceOp.FullyConnected.name
and len(input_shape) == 2):
axis_arg = ConverterUtil.get_arg(
op, MaceKeyword.mace_axis_str)
if axis_arg.i == 1:
axis_arg.i = 3
elif op.type == MaceOp.Squeeze.name:
for arg in op.arg:
if arg.name == MaceKeyword.mace_axis_str:
if (src_data_format == DataFormat.NCHW
and has_data_format
and len(self._producer[op.input[0]].output_shape[0].dims) == 4 # noqa
and len(op.output_shape[0].dims) == 2
and arg.ints == [2, 3]):
print("Transpose squeeze args: %s(%s)"
% (op.name, op.type))
arg.ints[:] = [1, 2]
elif op.type == MaceOp.Reduce.name:
for arg in op.arg:
if arg.name == MaceKeyword.mace_axis_str:
if src_data_format == DataFormat.NCHW and \
has_data_format:
print("Transpose reduce args: %s(%s)"
% (op.name, op.type))
reduce_axises = list(arg.ints)
new_axises = []
for i in range(len(reduce_axises)):
idx = reduce_axises[i]
if idx == 2 or idx == 3:
new_axises.append(idx - 1)
elif idx == 1:
new_axises.append(3)
else:
new_axises.append(idx)
new_axises.sort()
arg.ints[:] = []
arg.ints.extend(new_axises)
elif op.type == MaceOp.Crop.name:
offset_arg = ConverterUtil.get_arg(op,
MaceKeyword.mace_offset_str)
mace_check(offset_arg and
src_data_format == DataFormat.NCHW
and has_data_format
and len(op.output_shape[0].dims) == 4,
"MACE only support crop with NCHW format")
print("Transpose crop args: %s(%s)"
% (op.name, op.type))
self.transpose_shape(offset_arg.ints, [0, 2, 3, 1])
elif op.type == MaceOp.Reshape.name:
for arg in op.arg:
if arg.name == MaceKeyword.mace_dim_str and \
len(arg.ints) == 4 and \
src_data_format == DataFormat.NCHW and \
has_data_format:
self.transpose_shape(arg.ints, [0, 2, 3, 1])
# transpose op output shape
if src_data_format == DataFormat.NCHW and \
has_data_format:
print("Transpose output shapes: %s(%s)" % (op.name, op.type))
for output_shape in op.output_shape:
if len(output_shape.dims) == 4:
self.transpose_shape(output_shape.dims,
[0, 2, 3, 1])
return False
def quantize_nodes(self):
if not self._option.quantize:
return False
print("Add mace quantize and dequantize nodes")
for op in self._model.op:
for i in range(len(op.input)):
if op.input[i] in self.input_name_map:
op.input[i] = self.input_name_map[op.input[i]]
for i in range(len(op.output)):
if op.output[i] in self.output_name_map:
op.name = MaceKeyword.mace_output_node_name \
+ '_' + op.name
new_output_name = self.output_name_map[op.output[i]]
self._quantize_activation_info[new_output_name] = \
self._quantize_activation_info[op.output[i]]
if op.output[i] in self._consumers:
for consumer_op in self._consumers[op.output[i]]:
self.replace(consumer_op.input,
op.output[i],
new_output_name)
op.output[i] = new_output_name
data_type_arg = ConverterUtil.get_arg(
op, MaceKeyword.mace_op_data_type_str)
mace_check(data_type_arg, "Data type does not exist for %s(%s)"
% (op.name, op.type))
if data_type_arg.i == mace_pb2.DT_FLOAT:
data_type_arg.i = mace_pb2.DT_UINT8
elif data_type_arg.i == mace_pb2.DT_UINT8:
mace_check(op.type == MaceOp.Quantize.name
or op.type == MaceOp.Dequantize.name,
"Only Quantization ops support uint8, "
"but got %s(%s)" % (op.name, op.type))
else:
mace_check(op.type == MaceOp.Quantize.name,
"Quantization only support float ops, "
"but get %s(%s, %s)"
% (op.name, op.type,
mace_pb2.DataType.Name(data_type_arg.i)))
for i, input_node in enumerate(self._option.input_nodes.values()):
new_input_name = self.input_name_map[input_node.name]
op_def = self._model.op.add()
op_def.name = self.normalize_op_name(new_input_name)
op_def.type = MaceOp.Quantize.name
op_def.input.extend([input_node.name])
op_def.output.extend([new_input_name])
output_shape = op_def.output_shape.add()
output_shape.dims.extend(input_node.shape)
quantize_info = self._quantize_activation_info[new_input_name]
self.copy_quantize_info(op_def, quantize_info)
self._model.input_info[i].scale = quantize_info.scale
self._model.input_info[i].zero_point = quantize_info.zero_point
ConverterUtil.add_data_type_arg(op_def, mace_pb2.DT_UINT8)
ConverterUtil.add_data_format_arg(op_def, input_node.data_format)
# use actual ranges for model input quantize
find_range_every_time_arg = op_def.arg.add()
find_range_every_time_arg.name = \
MaceKeyword.mace_find_range_every_time
find_range_every_time_arg.i = 1
output_nodes = self._option.check_nodes.values()
for i, output_node in enumerate(output_nodes):
op_def = self._model.op.add()
op_def.name = self.normalize_op_name(output_node.name)
op_def.type = MaceOp.Dequantize.name
op_def.input.extend([self.output_name_map[output_node.name]])
op_def.output.extend([output_node.name])
output_shape = op_def.output_shape.add()
producer_op = self._producer[output_node.name]
output_shape.dims.extend(producer_op.output_shape[0].dims)
op_def.output_type.extend([mace_pb2.DT_FLOAT])
quantize_info = producer_op.quantize_info[0]
self._model.output_info[i].scale = quantize_info.scale
self._model.output_info[i].zero_point = quantize_info.zero_point
ConverterUtil.add_data_type_arg(op_def, mace_pb2.DT_UINT8)
ConverterUtil.add_data_format_arg(op_def, output_node.data_format)
quantize_flag_arg = self._model.arg.add()
quantize_flag_arg.name = MaceKeyword.mace_quantize_flag_arg_str
quantize_flag_arg.i = 1
return False
def quantize_tensor(self, tensor):
"""Assume biasadd has been already folded with convolution and fc"""
if tensor.data_type == mace_pb2.DT_FLOAT:
ops = self._consumers.get(tensor.name, None)
check_conv = False
check_deconv = False
if ops is not None and len(ops) == 1:
if len(ops[0].input) >= 3:
check_conv =\
ops[0].type in [MaceOp.Conv2D.name,
MaceOp.DepthwiseConv2d.name,
MaceOp.FullyConnected.name]\
and ops[0].input[2] == tensor.name
# in tensorflow deconv's bias is the forth input
if ops[0].type in [MaceOp.Deconv2D.name,
MaceOp.DepthwiseDeconv2d]:
from_caffe = ConverterUtil.get_arg(
ops[0],
MaceKeyword.mace_framework_type_str).i ==\
FrameworkType.CAFFE.value
if from_caffe and len(ops[0].input) >= 3:
check_deconv = ops[0].input[2] == tensor.name
else:
if len(ops[0].input) >= 4:
check_deconv = ops[0].input[3] == tensor.name
if check_conv or check_deconv:
if self._option.device == DeviceType.CPU.value \
or self._option.device == DeviceType.APU.value:
conv_op = ops[0]
scale_input = self._quantize_activation_info[
conv_op.input[0]].scale
if conv_op.input[1] not in self._quantized_tensor:
self.quantize_tensor(self._consts[conv_op.input[1]])
scale_filter = self._consts[conv_op.input[1]].scale
scale = scale_input * scale_filter
quantized_tensor = \
quantize_util.quantize_with_scale_and_zero(
tensor.float_data, scale, 0)
elif self._option.device == DeviceType.HEXAGON.value or \
self._option.device == DeviceType.HTA.value:
quantized_tensor = \
quantize_util.quantize_bias_for_hexagon(
tensor.float_data)
else:
mace_check(False, "wrong device.")
tensor.data_type = mace_pb2.DT_INT32
else:
non_zero = self._option.device == DeviceType.CPU.value
quantized_tensor = quantize_util.quantize(tensor.float_data,
self._option.device,
non_zero)
tensor.data_type = mace_pb2.DT_UINT8
del tensor.float_data[:]
tensor.int32_data.extend(quantized_tensor.data)
tensor.scale = quantized_tensor.scale
tensor.zero_point = quantized_tensor.zero
tensor.minval = quantized_tensor.minval
tensor.maxval = quantized_tensor.maxval
tensor.quantized = True
self._quantized_tensor.update([tensor.name])
return False
def quantize_weights(self):
print("Quantize weights")
net = self._model
for tensor in net.tensors:
self.quantize_tensor(tensor)
return False
def quantize_large_tensor(self, tensor):
if tensor.data_type == mace_pb2.DT_FLOAT:
ops = self._consumers.get(tensor.name, None)
if ops is not None and len(ops) == 1:
if ops[0].type in [MaceOp.Conv2D.name,
MaceOp.FullyConnected.name]:
quantized_tensor = \
quantize_util.quantize(tensor.float_data,
self._option.device,
False)
tensor.data_type = mace_pb2.DT_UINT8
del tensor.float_data[:]
tensor.int32_data.extend(quantized_tensor.data)
tensor.scale = quantized_tensor.scale
tensor.zero_point = quantized_tensor.zero
tensor.minval = quantized_tensor.minval
tensor.maxval = quantized_tensor.maxval
tensor.quantized = True
self._quantized_tensor.update([tensor.name])
def quantize_large_weights(self):
print("Quantize large weights")
net = self._model
for tensor in net.tensors:
self.quantize_large_tensor(tensor)
return False
def add_quantize_info(self, op, minval, maxval):
scale, zero, minval, maxval = \
quantize_util.adjust_range(minval, maxval, self._option.device,
non_zero=False)
quantize_info = op.quantize_info.add()
quantize_info.minval = minval
quantize_info.maxval = maxval
quantize_info.scale = scale
quantize_info.zero_point = zero
return quantize_info
def copy_quantize_info(self, op, info):
quantize_info = op.quantize_info.add()
quantize_info.minval = info.minval
quantize_info.maxval = info.maxval
quantize_info.scale = info.scale
quantize_info.zero_point = info.zero_point
def transform_fake_quantize(self):
# Quantize info from fixpoint fine tune
print("Transform fake quantize")
net = self._model
for op in net.op:
if op.type == 'FakeQuantWithMinMaxVars' or \
op.type == 'FakeQuantWithMinMaxArgs':
if self._option.quantize and op.input[0] not in self._consts:
producer_op = self._producer[op.input[0]]
minval = ConverterUtil.get_arg(op, 'min').f
maxval = ConverterUtil.get_arg(op, 'max').f
quantize_info = \
self.add_quantize_info(producer_op, minval, maxval)
self._quantize_activation_info[op.input[0]] = quantize_info
# for add -> fakequant pattern
self._quantize_activation_info[op.output[0]] = \
quantize_info
print(op.input[0], op.output[0])
op.type = MaceOp.Identity.name
return False
def rearrange_batch_to_space(self):
if not self._option.quantize:
return False
# Put b2s after biasadd and relu
for conv_op in self._model.op:
if conv_op.type in [MaceOp.Conv2D.name,
MaceOp.DepthwiseConv2d.name] \
and self.consumer_count(conv_op.output[0]) == 1:
b2s_op = self._consumers[conv_op.output[0]][0]
if b2s_op.type == MaceOp.BatchToSpaceND.name \
and self.consumer_count(b2s_op.output[0]) == 1:
biasadd_or_act_op = self._consumers[b2s_op.output[0]][0]
if biasadd_or_act_op.type == MaceOp.BiasAdd.name:
biasadd_op = biasadd_or_act_op
if self.consumer_count(biasadd_op.output[0]) == 1 \
and self._consumers[biasadd_op.output[0]][0].type == MaceOp.Activation.name: # noqa
act_op = self._consumers[biasadd_op.output[0]][0]
biasadd_op.input[0] = conv_op.output[0]
b2s_op.input[0] = act_op.output[0]
for op in self._consumers[act_op.output[0]]:
self.replace(op.input,
act_op.output[0],
b2s_op.output[0])
else:
biasadd_op.input[0] = conv_op.output[0]
b2s_op.input[0] = biasadd_op.output[0]
for op in self._consumers[biasadd_op.output[0]]:
self.replace(op.input,
biasadd_op.output[0],
b2s_op.output[0])
print("Rearrange batch to space: %s(%s)"
% (b2s_op.name, b2s_op.type))
return True
elif biasadd_or_act_op.type == MaceOp.Activation.name:
act_op = biasadd_or_act_op
act_op.input[0] = conv_op.output[0]
b2s_op.input[0] = act_op.output[0]
for op in self._consumers[act_op.output[0]]:
self.replace(op.input,
act_op.output[0],
b2s_op.output[0])
print("Rearrange batch to space: %s(%s)"
% (b2s_op.name, b2s_op.type))
return True
return False
def add_quantize_tensor_range(self):
# Quantize info from range statistics
range_file = self._option.quantize_range_file
if range_file:
print("Add quantize tensor range")
post_quantize_info = {}
with open(range_file) as f:
for line in f:
tensor_name, minmax = line.split("@@")[:2]
min_val, max_val = [float(i) for i in
minmax.strip().split(",")]
scale, zero, min_val, max_val = \
quantize_util.adjust_range(min_val, max_val,
self._option.device,
non_zero=False)
activation_info = mace_pb2.QuantizeActivationInfo()
activation_info.minval = min_val
activation_info.maxval = max_val
activation_info.scale = scale
activation_info.zero_point = zero
if tensor_name not in self._quantize_activation_info:
post_quantize_info[tensor_name] = activation_info
for op in self._model.op:
if op.name.find(MaceKeyword.mace_output_node_name) >= 0:
continue
for output in op.output:
# Prefer quantize info from quantization-aware training
if output not in self._quantize_activation_info:
mace_check(output in post_quantize_info,
"%s does not have quantize activation info"
% op)
op.quantize_info.extend([post_quantize_info[output]])
self._quantize_activation_info[output] = \
post_quantize_info[output]
if not self._option.quantize:
return False
print("Add default quantize info for input")
for i, input_node in enumerate(self._option.input_nodes.values()):
if input_node.name not in self._quantize_activation_info:
print("Input range %s: %s" % (input_node.name,
str(input_node.range)))
new_input_name = self.input_name_map[input_node.name]
scale, zero, minval, maxval = \
quantize_util.adjust_range(input_node.range[0],
input_node.range[1],
self._option.device,
non_zero=False)
quantize_info = \
mace_pb2.QuantizeActivationInfo()
quantize_info.minval = minval
quantize_info.maxval = maxval
quantize_info.scale = scale
quantize_info.zero_point = zero
self._quantize_activation_info[new_input_name] = quantize_info
input_op = self._producer[input_node.name]
input_op.quantize_info.extend([quantize_info])
print("Add default quantize info for ops like Pooling, Softmax")
for op in self._model.op:
if op.type in [MaceOp.ExpandDims.name,
MaceOp.Pad.name,
MaceOp.Pooling.name,
MaceOp.Reduce.name,
MaceOp.Reshape.name,
MaceOp.ResizeBilinear.name,
MaceOp.Squeeze.name,
MaceOp.StridedSlice.name,
MaceOp.BatchToSpaceND.name,
MaceOp.SpaceToBatchND.name,
MaceOp.SpaceToDepth.name,
MaceOp.DepthToSpace.name]:
del op.quantize_info[:]
producer_op = self._producer[op.input[0]]
if producer_op.output[0] in self._option.input_nodes:
new_input_name = self.input_name_map[producer_op.output[0]]
self.copy_quantize_info(
op, self._quantize_activation_info[new_input_name])
else:
self.copy_quantize_info(op,
producer_op.quantize_info[0])
self._quantize_activation_info[op.output[0]] = \
op.quantize_info[0]
elif (op.type == MaceOp.Concat.name
and (not op.quantize_info
or self._option.change_concat_ranges)):
if op.quantize_info:
maxval = op.quantize_info[0].maxval
minval = op.quantize_info[0].minval
del op.quantize_info[:]
else:
maxval = float("-inf")
minval = float("inf")
for i in range(len(op.input)):
minval = min(minval, self._producer[op.input[i]].quantize_info[0].minval) # noqa
maxval = max(maxval, self._producer[op.input[i]].quantize_info[0].maxval) # noqa
quantize_info = \
self.add_quantize_info(op, minval, maxval)
self._quantize_activation_info[op.output[0]] = quantize_info
if self._option.change_concat_ranges:
for i in range(len(op.input)):
producer_op = self._producer[op.input[i]]
del producer_op.quantize_info[:]
self.copy_quantize_info(producer_op, quantize_info)
self._quantize_activation_info[producer_op.output[0]] \
= producer_op.quantize_info[0]
elif op.type == MaceOp.Activation.name:
act_type = ConverterUtil.get_arg(
op, MaceKeyword.mace_activation_type_str).s.decode()
if act_type not in [ActivationType.TANH.name,
ActivationType.SIGMOID.name]:
continue
del op.quantize_info[:]
if act_type == ActivationType.TANH.name:
quantize_info = self.add_quantize_info(op, -1.0, 1.0)
else:
quantize_info = self.add_quantize_info(op, 0.0, 1.0)
self._quantize_activation_info[op.output[0]] = quantize_info
elif op.type == MaceOp.Softmax.name:
del op.quantize_info[:]
quantize_info = \
self.add_quantize_info(op, 0.0, 1.0)
self._quantize_activation_info[op.output[0]] = quantize_info
elif (op.type == MaceOp.Eltwise.name
and not op.quantize_info
and len(op.input) == 2
and op.input[0] not in self._consts
and op.input[1] not in self._consts):
producer_op0 = self._producer[op.input[0]]
producer_op1 = self._producer[op.input[1]]
if ConverterUtil.get_arg(
op, MaceKeyword.mace_element_type_str).i \
== EltwiseType.SUM.value:
minval = producer_op0.quantize_info[0].minval \
+ producer_op1.quantize_info[0].minval
maxval = producer_op0.quantize_info[0].maxval \
+ producer_op1.quantize_info[0].maxval
elif ConverterUtil.get_arg(
op, MaceKeyword.mace_element_type_str).i \
== EltwiseType.SUB.value:
minval = producer_op0.quantize_info[0].minval \
- producer_op1.quantize_info[0].maxval
maxval = producer_op0.quantize_info[0].maxval \
- producer_op1.quantize_info[0].minval
else:
print(op)
mace_check(False, "Quantized Elementwise only support:"
" SUM and SUB without ranges now.")
quantize_info = \
self.add_quantize_info(op, minval, maxval)
self._quantize_activation_info[op.output[0]] = quantize_info
return False
def check_quantize_info(self):
if not self._option.quantize:
return False
print("Check quantize info")
for op in self._model.op:
if (op.name.find(MaceKeyword.mace_input_node_name) == -1
and op.name.find(MaceKeyword.mace_output_node_name) == -1
and op.type != MaceOp.Quantize.name
and op.type != MaceOp.Dequantize.name): # noqa
mace_check(len(op.output) == len(op.quantize_info),
"missing quantize info: %s" % op)
for i in six.moves.range(len(op.quantize_info)):
print("Op output %s range: [%f, %f]" % (
op.output[i],
op.quantize_info[i].minval,
op.quantize_info[i].maxval))
def fp16_gather_weight(self):
for op in self._model.op:
if op.type != MaceOp.Gather.name:
continue
if op.input[0] not in self._consts:
raise KeyError("Not in const tensor: " + str(op.input[0]))
const_tensor = self._consts[op.input[0]]
if const_tensor.data_type == mace_pb2.DT_FLOAT16:
print(str(const_tensor.name) + " is alreay float16")
continue
print("FP16 Embedding Lookup Weights: %s" % const_tensor.name)
op_outputs = [x for x in op.output]
new_gather_name = op.name + '_fp16'
new_gather_output_name = new_gather_name + ":0"
dehalve_name = op.name
# fp16 weights
const_tensor.data_type = mace_pb2.DT_FLOAT16
# change gather
op.name = new_gather_name
op.output[:] = [new_gather_output_name]
# op.output.extend([new_gather_output_name])
data_type_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_op_data_type_str) # noqa
if data_type_arg is None:
data_type_arg = op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = mace_pb2.DT_FLOAT16
# add dehalve
dehalve_op = self._model.op.add()
dehalve_op.name = dehalve_name
dehalve_op.type = MaceOp.Cast.name
dehalve_op.input.extend([new_gather_output_name])
dehalve_op.output.extend(op_outputs)
dehalve_op.output_shape.extend(op.output_shape)
dehalve_op.output_type.extend([mace_pb2.DT_FLOAT])
data_type_arg = dehalve_op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = mace_pb2.DT_FLOAT16
def fp16_matmul_weight(self):
if self._option.device != DeviceType.CPU.value:
return
print('Convert matmul weights to fp16 for specific matmul: activation + weights') # noqa
for op in self._model.op:
if op.type != MaceOp.MatMul.name:
continue
if op.input[0] not in self._consts and op.input[1] not in self._consts: # noqa
continue
if op.input[0] in self._consts and op.input[1] in self._consts:
continue
# Matmul fp16 Op only support fp32[1,k] x fp16[w,k]T or fp16[w,k] x fp32[k,1] now! # noqa
transpose_a_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_transpose_a_str) # noqa
transpose_b_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_transpose_b_str) # noqa
transpose_a = transpose_a_arg is not None and transpose_a_arg.i == 1 # noqa
transpose_b = transpose_b_arg is not None and transpose_b_arg.i == 1 # noqa
left_tensor = op.input[0]
right_tensor = op.input[1]
left_shape = self.get_tensor_shape(left_tensor)
right_shape = self.get_tensor_shape(right_tensor)
height = left_shape[-1] if transpose_a else left_shape[-2]
width = right_shape[-2] if transpose_b else right_shape[-1]
batch = reduce(lambda x, y: x * y, left_shape[: -2], 1)
if batch != 1:
continue
if left_tensor in self._consts:
if width != 1 or transpose_a:
continue
const_tensor = self._consts[left_tensor]
else:
if height != 1 or not transpose_b:
continue
const_tensor = self._consts[right_tensor]
print('Convert Matmul Weights to fp16: %s' % op.name)
const_tensor.data_type = mace_pb2.DT_FLOAT16
data_type_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_op_data_type_str) # noqa
if data_type_arg is None:
data_type_arg = op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = mace_pb2.DT_FLOAT16
op.output_type.extend([mace_pb2.DT_FLOAT])
def add_opencl_informations(self):
print("Add OpenCL informations")
net = self._model
arg = net.arg.add()
arg.name = MaceKeyword.mace_opencl_mem_type
arg.i = mace_pb2.GPU_IMAGE if self._option.cl_mem_type == "image"\
else mace_pb2.GPU_BUFFER
def transform_reshape_and_flatten(self):
net = self._model
for op in net.op:
if op.type != MaceOp.Reshape.name:
continue
dim_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_dim_str)
shape_tensor = None
if len(op.input) == 1:
print("Transform Caffe Reshape")
dims = []
axis_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str)
# transform caffe reshape op
if dim_arg:
dims = dim_arg.ints
shape_tensor = net.tensors.add()
shape_tensor.name = op.name + '_shape'
shape_tensor.dims.append(len(op.output_shape[0].dims))
shape_tensor.data_type = mace_pb2.DT_INT32
# transform caffe flatten op
elif axis_arg is not None:
axis = axis_arg.i
for i in range(0, axis):
dims.append(0)
dims.append(-1)
for i in range(axis + 1, len(op.output_shape[0].dims)):
dims.append(0)
shape_tensor = net.tensors.add()
shape_tensor.name = op.name + '_shape'
shape_tensor.dims.append(len(dims))
shape_tensor.data_type = mace_pb2.DT_INT32
else:
mace_check(False, "Only support reshape and flatten")
shape_tensor.int32_data.extend(dims)
op.input.append(shape_tensor.name)
def transform_shape_tensor_to_param(self):
kOpTypeInputIdxMap = {
MaceOp.ResizeNearestNeighbor.name: 1,
MaceOp.Deconv2D.name: 2,
MaceOp.Reshape.name: 1,
}
net = self._model
for op in net.op:
if op.type not in kOpTypeInputIdxMap:
continue
shape_idx = kOpTypeInputIdxMap[op.type]
dim_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_dim_str)
if len(op.input) > shape_idx and dim_arg is None and \
op.input[shape_idx] in self._consts:
shape_tensor = self._consts[op.input[shape_idx]]
dim_arg = op.arg.add()
dim_arg.name = MaceKeyword.mace_dim_str
dim_arg.ints.extend(shape_tensor.int32_data)
def fold_fc_reshape(self):
net = self._model
for op in net.op:
# whether to reshape fc output(default 4D)
if op.type == MaceOp.FullyConnected.name and\
op.output[0] in self._consumers:
consumers = self._consumers[op.output[0]]
op_output_shape = op.output_shape[0].dims[:]
for consumer in consumers:
if consumer.type == MaceOp.Reshape.name and \
consumer.input[1] in self._consts and \
self._consts[consumer.input[1]].int32_data[:] == \
[op_output_shape[0], 1, 1, op_output_shape[1]]:
# work for tensorflow
net.tensors.remove(self._consts[consumer.input[1]])
del consumer.input[1]
self.safe_remove_node(consumer, None)
return True
return False
def transform_channel_shuffle(self):
net = self._model
for op in net.op:
if op.type == MaceOp.Transpose.name and \
len(op.output_shape[0].dims) == 5:
perm = ConverterUtil.get_arg(op,
MaceKeyword.mace_dims_str).ints
if [0, 1, 2, 4, 3] == list(perm):
# Remove the following Reshape op
reshape_op = self._consumers.get(op.output[0], None)
if (reshape_op and
len(reshape_op) == 1 and
reshape_op[0].type == MaceOp.Reshape.name and
len(reshape_op[0].output_shape[0].dims) == 4):
print("Transform channel shuffle")
output_shape = reshape_op[0].output_shape[0].dims
self.safe_remove_node(reshape_op[0], op,
remove_input_tensor=True)
else:
return False
# Change Transpose op to ChannelShuffle
op.type = MaceOp.ChannelShuffle.name
del op.arg[:]
group_arg = op.arg.add()
group_arg.name = MaceKeyword.mace_group_str
group_arg.i = op.output_shape[0].dims[4]
op.output_shape[0].dims[:] = output_shape
# Remove previous Reshape op
producer_op = self._producer.get(op.input[0], None)
if producer_op:
if producer_op.type == MaceOp.Reshape.name:
self.safe_remove_node(producer_op, None)
elif producer_op.type == MaceOp.Stack.name:
print("Change channel shuffle stack to concat")
# Change previous Stack op to Concat if any
producer_op.type = MaceOp.Concat.name
producer_op.output_shape[0].dims[:] = output_shape
return True
def quantize_specific_ops_only(self):
"""
This transform rule is only used internally, we are not gonna make
things too complex for users
"""
to_quantize_ops_output_type = {
MaceOp.MatMul.name: mace_pb2.DT_INT32,
MaceOp.Gather.name: mace_pb2.DT_UINT8,
}
for op in self._model.op:
if (op.type not in to_quantize_ops_output_type
or len(op.output) > 1
or ConverterUtil.get_arg(op,
MaceKeyword.mace_op_data_type_str).i != mace_pb2.DT_FLOAT): # noqa
# only support single output
continue
quantized_inputs_names = []
should_quantize = False
has_const = False
for idx, input_tensor in enumerate(op.input):
if input_tensor in self._consts:
has_const = True
break
if not has_const:
continue
for idx, input_tensor in enumerate(op.input):
if self.get_tensor_data_type(input_tensor) \
== mace_pb2.DT_FLOAT:
should_quantize = True
break
if not should_quantize:
continue
else:
print("Quantize op %s (%s)" % (op.name, op.type))
non_zero = self._option.device == DeviceType.CPU.value \
and op.type == MaceOp.MatMul.name
for idx, input_tensor in enumerate(op.input):
quantized_inputs_names.append(input_tensor)
if self.get_tensor_data_type(input_tensor) \
!= mace_pb2.DT_FLOAT:
continue
if input_tensor in self._consts:
const_tensor = self._consts[input_tensor]
quantized_tensor = quantize_util.quantize(
const_tensor.float_data, self._option.device, non_zero)
del const_tensor.float_data[:]
const_tensor.int32_data.extend(quantized_tensor.data)
const_tensor.data_type = mace_pb2.DT_UINT8
const_tensor.scale = quantized_tensor.scale
const_tensor.zero_point = quantized_tensor.zero
const_tensor.minval = quantized_tensor.minval
const_tensor.maxval = quantized_tensor.maxval
const_tensor.quantized = True
else:
input_shape = self.get_tensor_shape(input_tensor)
quantize_op = self._model.op.add()
quantize_op.name = self.normalize_op_name(
input_tensor) + "_quant"
quantize_op.type = MaceOp.Quantize.name
quantize_op.input.extend([input_tensor])
quantize_output_name = quantize_op.name + '_0'
quantize_op.output.extend([quantize_output_name])
output_shape = quantize_op.output_shape.add()
output_shape.dims.extend(input_shape)
quantize_op.output_type.extend([mace_pb2.DT_UINT8])
data_type_arg = quantize_op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = mace_pb2.DT_UINT8
ConverterUtil.add_data_format_arg(
quantize_op,
self.get_tensor_data_format(input_tensor))
data_type_arg = quantize_op.arg.add()
data_type_arg.name = MaceKeyword.mace_non_zero
data_type_arg.i = 0
find_range_arg = quantize_op.arg.add()
find_range_arg.name = \
MaceKeyword.mace_find_range_every_time
find_range_arg.i = 1
quantized_inputs_names[-1] = quantize_output_name
del op.input[:]
op.input.extend(quantized_inputs_names)
original_output_name = op.output[0]
op.output[0] = original_output_name + "_quant"
op.output_type.extend([to_quantize_ops_output_type[op.type]])
data_type_arg = ConverterUtil.get_arg(op,
MaceKeyword.mace_op_data_type_str) # noqa
if data_type_arg is None:
data_type_arg = op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = mace_pb2.DT_UINT8
dequantize_op = self._model.op.add()
dequantize_op.name = op.name + "_dequant"
dequantize_op.type = MaceOp.Dequantize.name
dequantize_op.input.extend([op.output[0]])
dequantize_op.output.extend([original_output_name])
dequantize_op.output_shape.extend(op.output_shape)
dequantize_op.output_type.extend([mace_pb2.DT_FLOAT])
data_type_arg = dequantize_op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = to_quantize_ops_output_type[op.type]
ConverterUtil.add_data_format_arg(
dequantize_op,
self.get_tensor_data_format(original_output_name))
quantize_flag_arg = ConverterUtil.get_arg(self._model,
MaceKeyword.mace_quantize_flag_arg_str) # noqa
if quantize_flag_arg is None:
quantize_flag_arg = self._model.arg.add()
quantize_flag_arg.name = MaceKeyword.mace_quantize_flag_arg_str
quantize_flag_arg.i = 1
return True
return False
| 47.806438 | 128 | 0.51383 |
4a27466f957a497b4fccef8e529356b4647eeb1e | 14,638 | py | Python | eaitest.py | jrlevine/eaitesttools | b51d23db0f37235f32a1f88f7a5ff5b5e2c8cdf1 | [
"BSD-2-Clause"
] | 1 | 2020-12-13T20:25:15.000Z | 2020-12-13T20:25:15.000Z | eaitest.py | jrlevine/eaitesttools | b51d23db0f37235f32a1f88f7a5ff5b5e2c8cdf1 | [
"BSD-2-Clause"
] | null | null | null | eaitest.py | jrlevine/eaitesttools | b51d23db0f37235f32a1f88f7a5ff5b5e2c8cdf1 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/local/bin/python3
#
# EAI feature tests
from bottle import request, route, get, post, run, hook, app, template, view, \
static_file, redirect, HTTPResponse, BaseRequest
from beaker.middleware import SessionMiddleware
from eaidb import EAIdb
import re, sys
import base64
debug = False
# allow large zones
BaseRequest.MEMFILE_MAX = 1024*1024
# to keep out random snoopers
apikeys = {
"xxxxx": 'bob',
"yyyyyyy": 'mary',
}
session_opts = {
'session.key': 'eaitest',
'session.type': 'file',
'session.data_dir': '/tmp/session',
'session.lock_dir': '/tmp/sessionlock',
'session.cookie_expires' : 86400,
'session.secret': "swordfish",
'session.auto': True
}
# my cookie directory, for debugging
if __name__=="__main__":
if len(sys.argv) >= 2 and sys.argv[1].startswith("debug"):
session_opts['session.data_dir'] ='/tmp/mysession'
session_opts['session.lock_dir'] ='/tmp/mysessionlock'
debug = True
print("Debugging on")
myapp = SessionMiddleware(app(), session_opts)
# print something in red
def inred(msg):
if msg:
return '<font color="red">{0}</font>'.format(msg)
return msg
@hook('before_request')
def setup_request():
request.session = request.environ['beaker.session']
if 'user' not in request.session and request.path not in ('/api', '/login') and not request.path.startswith('/static'):
return redirect("/login")
# print "path is",request.path
def boilerplate():
"""
boilerplate toolbar at the top of each page
"""
here = request.path
def bp(page, desc):
if here == page:
return "<li><a href=\"{}\" class=active>{}</a></li>\n".format(page,desc)
else:
return "<li><a href=\"{}\">{}</a></li>\n".format(page,desc)
bo = "<ul id=tabnav>\n"
bo += bp("/packages","Packages")
bo += bp("/tasks","Tasks")
bo += bp("/tests","Tests")
bo += bp("/summary","Summary")
bo += bp("/help","Help")
bo += bp("/logout", "Logout")
bo += "</ul>\n<p align=right>Logged in as " + request.session['user']
bo += "</p>"
return bo
@view('failpage')
def failpage(why):
""" return a failure page
"""
return dict(boilerplate=boilerplate(),
kvetch=why)
@view('statuspage')
def statuspage(why):
""" return a status page
"""
return dict(boilerplate=boilerplate(),
kvetch=why)
#################### Session management ###########################
@get('/')
@get('/login')
@view('login')
def login():
return dict(name="EAI Tests")
@post('/login')
@view('login')
def loginp():
db = EAIdb(request.forms.user,debug=debug)
cqr = db.userlogin(pw=request.forms.pw)
if cqr:
request.session['user'] = cqr['user']
request.session['ttid'] = cqr['ttid']
return redirect('/packages')
if 'user' in request.session:
del request.session['user']
return dict(name="EAI tests", kvetch='User or password not recognized')
@get('/logout')
def logout():
""" log out and return to login page """
del request.session['user']
return redirect('/login')
@get('/packages')
@view('packages')
def packages():
db = EAIdb(request.session['user'],debug=debug)
prods = db.getproducts()
if not prods:
return failpage("No packages")
return dict(name="Packages to be tested", boilerplate=boilerplate(), prods=prods)
@get('/package/<pid:int>')
@view('package')
def package(pid):
db = EAIdb(request.session['user'],debug=debug)
prod = db.getproduct(pid=pid)
return dict(name=f"Package info for {prod['name']}", prod=prod,
boilerplate=boilerplate())
############################## Tests #############################
testtype = ('MUA','MSA','MTA','MDA','MSP','Web')
def typeselect(name="ttype"):
sel = f'<select name="{name}">\n' + \
"".join((f"<option>{t}</option>\n" for t in testtype)) + \
"</select>\n"
return sel
taskstate = ('assigned','working','done')
def stateselect(name="tstate", defstate=None):
sel = f'<select name="{name}">\n' + \
"".join(("<option {1}>{0}</option>\n".format(t, "selected" if t==defstate else "")
for t in taskstate)) + \
"</select>\n"
return sel
@get('/tests')
@view('tests')
def tests0():
return(tests(testtype[0])) # default
@get('/tests/<ttype>')
@view('tests')
def tests(ttype):
db = EAIdb(request.session['user'],debug=debug)
tests = db.gettests(ttype)
return dict(name="Tests for "+ttype, ttype=ttype, tests=tests,
testtype=testtype, boilerplate=boilerplate())
@get('/test/<tid:int>')
@view('test')
def test(tid):
"""
show one test
"""
db = EAIdb(request.session['user'],debug=debug)
test = db.gettest(tid=tid)
if not test:
return failpage("No such test")
return dict(name="Test description for "+test['testid'], test=test, tid=tid, boilerplate=boilerplate())
################ Tasks ################
@get('/tasks/<sortkey>')
@view('tasks')
def taskssort(kvetch=None , sortkey=None):
if sortkey in ('user', 'product','testtype','state'):
return tasks(sortkey=sortkey)
return tasks(kvetch=f"Mystery sortkey {sortkey}")
@get('/tasks')
@view('tasks')
def tasks(kvetch=None , sortkey=None):
db = EAIdb(request.session['user'],debug=debug)
tasks = db.gettasks(stats=True)
if sortkey:
tasks.sort(key=lambda x: x[sortkey])
return dict(name="Assigned Tasks", boilerplate=boilerplate(),
tasks=tasks, kvetch=kvetch)
@get('/finish/<ttid:int>/<pid:int>/<ttype>/<state>')
def chgtask(ttid, pid, ttype, state):
"""
mark task as done or something
"""
db = EAIdb(request.session['user'],debug=debug)
product = db.getproduct(pid=pid)
ldone = db.getresults(ttid, pid, ttype, done=True)
ndone = db.getresults(ttid, pid, ttype, done=False)
args = {'ttid': ttid,
'pid': pid,
'testtype': ttype,
'state': state
}
r = db.addtask(args, update=True)
if not r[0]:
if debug:
print("failed",r)
return tasks(kvetch=r[1]) # error message
return tasks()
@get('/newtask')
@view('newtask')
def newtask():
db = EAIdb(request.session['user'],debug=debug)
return dict(name="Add a new task",
typeselect=typeselect(),
testerselect=db.testerselect(),
productselect=db.productselect(),
stateselect=stateselect(),
boilerplate=boilerplate())
@post('/newtask')
def pnewtask():
db = EAIdb(request.session['user'],debug=debug)
args = {'ttid': request.forms.tester,
'pid': request.forms.product,
'testtype': request.forms.ttype,
'state': request.forms.tstate
}
r = db.addtask(args)
if not r[0]:
if debug:
print("failed",r)
return tasks(kvetch=r[1]) # error message
return tasks()
@get('/task/<ttid:int>/<pid:int>/<ttype>')
@view('task')
def task(ttid, pid, ttype):
"""
show pending and completed tests in a task
"""
db = EAIdb(request.session['user'],debug=debug)
product = db.getproduct(pid=pid)
ldone = db.getresults(ttid, pid, ttype, done=True)
ndone = db.getresults(ttid, pid, ttype, done=False)
return dict(boilerplate=boilerplate(), name="Tests in this Task", ttid=ttid, pid=pid, ttype=ttype,
ldone=ldone, ndone=ndone, product=product)
@get('/result/<ttid:int>/<pid:int>/<tid:int>')
@view('result')
def result(ttid, pid, tid, kvetch=None, comments=None):
"""
show or update a result for a specific test
"""
db = EAIdb(request.session['user'],debug=debug)
res = db.getoneresult(tid, pid, ttid)
hashval = db.dhash(res)
test = db.gettest(tid=tid)
product = db.getproduct(pid=pid)
if res and res['picture']:
picurl = pictourl(res['picture'])
else:
picurl = None
if comments:
res['comments'] = comments # keep value from failed update
return dict(boilerplate=boilerplate(), name="Test Result", ttid=ttid, pid=pid, tid=tid,
res=res, test=test, product=product, picurl=picurl, kvetch=kvetch, hashval=hashval)
def pictourl(pic):
"""
turn bytes into data URL
"""
if pic.startswith(b'\x89\x50\x4e\x47\r\n\x1a\n'): # PNG signature
return b"data:image/png;base64," + base64.b64encode(pic)
if pic.startswith(b'\xff\xd8'):
return b"data:image/jpeg;base64," + base64.b64encode(pic)
return b"data:,Unknown%20file%20format"
@post('/result/<ttid:int>/<pid:int>/<tid:int>')
@view('result')
def postresult(ttid, pid, tid):
"""
add or update a test result
"""
db = EAIdb(request.session['user'],debug=debug)
status = request.forms.s
comments = request.forms.c
picture = request.files.get('pic')
oldhv = request.forms.hv
if not status:
return result(ttid, pid, tid, kvetch="Status not set")
test = db.gettest(tid=tid)
if request.forms.rr:
return task(ttid, pid, test['testtype'])
if request.forms.nn:
return result(ttid, pid, tid+1)
if not status:
return result(ttid, pid, tid, kvetch="Status not set", comments=comments)
res = db.getoneresult(tid, pid, ttid)
hashval = db.dhash(res)
if hashval != oldhv:
return result(ttid, pid, tid, kvetch="Database changed", comments=comments)
if picture:
l = picture.content_length
pictext = picture.file.read(l)
else:
pictext = None
r, m = db.addresult(tid, ttid, pid, status, comments, pictext)
if debug:
print("did addresult",request.forms.u, request.forms.ur, request.forms.un)
# return to this page
if (not r) or request.forms.u:
return result(ttid, pid, tid, kvetch=m)
if request.forms.un: # next test
if debug:
print("result",ttid,pid,tid+1)
return result(ttid, pid, tid+1)
# return to test page
return task(ttid, pid, test['testtype'])
@get('/summary/<ttype>')
@view('summary')
def tsummary(ttype):
return summary(ttype=ttype)
@get('/summary')
@view('summary')
def summary(ttype='MUA', ttid=None):
"""
summary table of tests and products
"""
db = EAIdb(request.session['user'],debug=debug)
if not ttid:
ttid=request.session['ttid']
s = db.getsummary(ttid, testtype=ttype)
if not s:
return template('nosummary', boilerplate=boilerplate(), name="Test Summary",
ttype=ttype, ttid=ttid, testtype=testtype, testerselect=db.testerselect(addall=True))
return dict(boilerplate=boilerplate(), name="Test Summary", products=s[0],
tests=s[1], results=s[2], ttype=ttype, ttid=ttid, testtype=testtype,
testerselect=db.testerselect(addall=True))
@post('/summary')
@view('summary')
def postsummary():
"""
change who or what on summary page
"""
ttid = request.forms.tester
ttype = request.forms.ttype
return summary(ttype=ttype, ttid=ttid)
################################################################
# programmatic API
@post('/api')
def api():
"""
API for
json blob of
apikey: secret key
request: getresult, setresult, getresults, gettasks
getresults; product, testtype, optional Done. optional tester
getresult: product, testid or tid
setresult: product, testid or tid, status, optional comments
tasks: product, testtype
response blob of
request: whatever
answer: yes
"""
j = request.json
# print("request", j)
k = j.get('apikey','x')
if k not in apikeys or 'request' not in j:
raise HTTPResponse(status=403)
db = EAIdb(apikeys[k],debug=debug) # fake login as user for api key
req = j['request']
r = { 'request': req, "answer": "yes" }
# get tester ID
if 'ttid' in j:
ttid = j['ttid']
else:
user = db.getuser()
ttid = user['ttid']
# get product
if req == 'products':
res = db.getproducts()
r['result'] = res
return r
if 'product' not in j:
raise HTTPResponse(status=403)
product = db.getproduct(name=j['product'])
if not product:
raise HTTPResponse(status=403)
pid = product['pid']
if req == 'getresults':
if 'testtype' not in j:
raise HTTPResponse(status=403)
if debug:
print("getresults", ttid, pid, j['testtype'], j.get('done', False)) # check for arguments
res = db.getresults(ttid, pid, j['testtype'], j.get('done', False)) # check for arguments
r['result'] = res
return r
elif req in ('getresult', 'setresult'):
if 'tid' in j:
tid = j['tid']
elif 'testid' in j:
tt = db.gettest(testid=j['testid'])
if tt:
tid = tt['tid']
else:
raise HTTPResponse(status=403) # unknown test
else:
raise HTTPResponse(status=403) # need tid or testid
if req == 'getresult':
res = db.getoneresult(tid, pid)
r['result'] = res
else: # setresult
if 'status' not in j:
raise HTTPResponse(status=403) # need status
res = db.addresult(tid, ttid, pid, j['status'], comments=j.get('comments', None))
r['result'] = res
return r
elif req == 'tasks':
testtype = j.get('testtype') # default OK
res = db.gettasks(testtype=testtype, pid=pid, stats=True)
r['result'] = res
return r
else:
raise HTTPResponse(status=403) # unknown request
############################################################
# try to be helpful
@get('/help')
@view('help')
def help():
""" be helpful
"""
return dict(boilerplate=boilerplate(), name="Help")
################################################################
# for CSS and images
@route('/static/<filename:path>')
def send_static(filename):
return static_file(filename, root='./static')
@route('/favicon.ico')
def favicon():
return static_file('favicon.ico', root='./static')
@route('/robots.txt')
def robots():
return static_file('robots.txt', root='./static')
################# main stub for debugging
if __name__=="__main__":
import sys
if len(sys.argv) >= 2 and sys.argv[1] == "debug":
run(app=myapp, host='localhost', port=8802, debug=True, reloader=True)
else:
run(app=myapp, server="cgi", debug=True)
| 27.776091 | 123 | 0.587785 |
4a27475926312ccb7a625df32634d6512bd2e976 | 436 | py | Python | src/gpt_utils/location.py | nelsonlove/gpt-utils | 1ce52a160c26b647f7674bf518a322b54b6080da | [
"MIT"
] | null | null | null | src/gpt_utils/location.py | nelsonlove/gpt-utils | 1ce52a160c26b647f7674bf518a322b54b6080da | [
"MIT"
] | null | null | null | src/gpt_utils/location.py | nelsonlove/gpt-utils | 1ce52a160c26b647f7674bf518a322b54b6080da | [
"MIT"
] | null | null | null | from . import GPT
from .prompt import ConversionPrompt
@GPT.requires_key
def fix_location(in_text):
"""Accepts a string containing a location and formats it properly."""
prompt = ConversionPrompt(
'I', 'O',
("lenox ma", "Lenox, MA"),
("london", "London, U.K."),
("chicago", "Chicago, IL"),
("dallas, tx", "Dallas, TX"),
engine='babbage'
)
return prompt.convert(in_text)
| 25.647059 | 73 | 0.591743 |
4a2748593c7ffd2811b7e0a1c9adcc5fcf3ca492 | 3,100 | py | Python | doc/source/examples/usage_helper.py | mail2nsrajesh/oslo.log | 33d76a5d296bc0685844a896970fb4f59cce5143 | [
"Apache-2.0"
] | null | null | null | doc/source/examples/usage_helper.py | mail2nsrajesh/oslo.log | 33d76a5d296bc0685844a896970fb4f59cce5143 | [
"Apache-2.0"
] | null | null | null | doc/source/examples/usage_helper.py | mail2nsrajesh/oslo.log | 33d76a5d296bc0685844a896970fb4f59cce5143 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A usage example with helper debugging of minimum Oslo Logging
This example requires the following package to be installed.
$ pip install oslo.log
Additional Oslo packages installed include oslo.config, oslo.context,
oslo.i18n, oslo.serialization and oslo.utils.
More information about Oslo Logging can be found at:
http://docs.openstack.org/developer/oslo.log/usage.html
"""
# Use default Python logging to display running output
import logging as py_logging
from oslo_config import cfg
from oslo_log import log as logging
LOG = py_logging.getLogger(__name__)
CONF = cfg.CONF
DOMAIN = "demo"
def prepare():
"""Prepare Oslo Logging (2 or 3 steps)
Use of Oslo Logging involves the following:
* logging.register_options
* logging.set_defaults (optional)
* logging.setup
"""
LOG.debug("Prepare Oslo Logging")
LOG.info("Size of configuration options before %d", len(CONF))
# Required step to register common, logging and generic configuration
# variables
logging.register_options(CONF)
LOG.info("Size of configuration options after %d", len(CONF))
# Optional step to set new defaults if necessary for
# * logging_context_format_string
# * default_log_levels
#
# These variables default to respectively:
#
# import oslo_log
# oslo_log._options.DEFAULT_LOG_LEVELS
# oslo_log._options.log_opts[0].default
#
custom_log_level_defaults = logging.get_default_log_levels() + [
'dogpile=INFO',
'routes=INFO'
]
logging.set_defaults(default_log_levels=custom_log_level_defaults)
# NOTE: We cannot show the contents of the CONF object
# after register_options() because accessing this caches
# the default_log_levels subsequently modified with set_defaults()
LOG.info("List of Oslo Logging configuration options and current values")
LOG.info("=" * 80)
for c in CONF:
LOG.info("%s = %s" % (c, CONF[c]))
LOG.info("=" * 80)
# Required setup based on configuration and domain
logging.setup(CONF, DOMAIN)
if __name__ == '__main__':
py_logging.basicConfig(level=py_logging.DEBUG)
prepare()
# NOTE: These examples do not demonstration Oslo i18n messages
LOG.info("Welcome to Oslo Logging")
LOG.debug("A debugging message")
LOG.warning("A warning occurred")
LOG.error("An error occurred")
try:
raise Exception("This is exceptional")
except Exception:
LOG.exception("An Exception occurred")
| 29.807692 | 77 | 0.71871 |
4a2749b71895f0ebbc35caf4072935fd945d9167 | 22,404 | py | Python | ucsmsdk/mometa/vm/VmLifeCyclePolicy.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 78 | 2015-11-30T14:10:05.000Z | 2022-02-13T00:29:08.000Z | ucsmsdk/mometa/vm/VmLifeCyclePolicy.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 113 | 2015-11-20T09:42:46.000Z | 2022-03-16T16:53:29.000Z | ucsmsdk/mometa/vm/VmLifeCyclePolicy.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 86 | 2015-12-12T08:22:18.000Z | 2022-01-23T03:56:34.000Z | """This module contains the general information for VmLifeCyclePolicy ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class VmLifeCyclePolicyConsts:
FSM_PREV_CONFIG_BEGIN = "configBegin"
FSM_PREV_CONFIG_FAIL = "configFail"
FSM_PREV_CONFIG_LOCAL = "configLocal"
FSM_PREV_CONFIG_PEER = "configPeer"
FSM_PREV_CONFIG_SUCCESS = "configSuccess"
FSM_PREV_NOP = "nop"
FSM_RMT_INV_ERR_CODE_ERR_2FA_AUTH_RETRY = "ERR-2fa-auth-retry"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_FAILED = "ERR-ACTIVATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_IN_PROGRESS = "ERR-ACTIVATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_RETRY = "ERR-ACTIVATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_BIOS = "ERR-BIOS-TOKENS-OLD-BIOS"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_CIMC = "ERR-BIOS-TOKENS-OLD-CIMC"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_NETWORK_BOOT_ORDER_NOT_FOUND = "ERR-BIOS-network-boot-order-not-found"
FSM_RMT_INV_ERR_CODE_ERR_BOARDCTRLUPDATE_IGNORE = "ERR-BOARDCTRLUPDATE-ignore"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_USB_UNMOUNTED = "ERR-DNLD-usb-unmounted"
FSM_RMT_INV_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_IN_PROGRESS = "ERR-Diagnostics-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_MEMTEST_IN_PROGRESS = "ERR-Diagnostics-memtest-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_NETWORK_IN_PROGRESS = "ERR-Diagnostics-network-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
FSM_RMT_INV_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
FSM_RMT_INV_ERR_CODE_ERR_HOST_FRU_IDENTITY_MISMATCH = "ERR-HOST-fru-identity-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_ANALYZE_RESULTS = "ERR-IBMC-analyze-results"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECT_ERROR = "ERR-IBMC-connect-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECTOR_INFO_RETRIEVAL_ERROR = "ERR-IBMC-connector-info-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_FRU_RETRIEVAL_ERROR = "ERR-IBMC-fru-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_INVALID_END_POINT_CONFIG = "ERR-IBMC-invalid-end-point-config"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_RESULTS_NOT_READY = "ERR-IBMC-results-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_MAX_SUBSCRIPTIONS_ALLOWED_ERROR = "ERR-MAX-subscriptions-allowed-error"
FSM_RMT_INV_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
FSM_RMT_INV_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
FSM_RMT_INV_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
FSM_RMT_INV_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
FSM_RMT_INV_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
FSM_RMT_INV_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_POWER_CAP_UNSUPPORTED = "ERR-POWER-CAP-UNSUPPORTED"
FSM_RMT_INV_ERR_CODE_ERR_POWER_PROFILE_IN_PROGRESS = "ERR-POWER-PROFILE-IN-PROGRESS"
FSM_RMT_INV_ERR_CODE_ERR_SERVER_MIS_CONNECT = "ERR-SERVER-mis-connect"
FSM_RMT_INV_ERR_CODE_ERR_SWITCH_INVALID_IF_CONFIG = "ERR-SWITCH-invalid-if-config"
FSM_RMT_INV_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
FSM_RMT_INV_ERR_CODE_ERR_UNABLE_TO_FETCH_BIOS_SETTINGS = "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_FAILED = "ERR-UPDATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_IN_PROGRESS = "ERR-UPDATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_RETRY = "ERR-UPDATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_ISSUE = "ERR-auth-issue"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_GET_ERROR = "ERR-auth-realm-get-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
FSM_RMT_INV_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
FSM_RMT_INV_ERR_CODE_ERR_CLI_SESSION_LIMIT_REACHED = "ERR-cli-session-limit-reached"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_TP = "ERR-create-tp"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
FSM_RMT_INV_ERR_CODE_ERR_DOWNGRADE_FAIL = "ERR-downgrade-fail"
FSM_RMT_INV_ERR_CODE_ERR_EFI_DIAGNOSTICS_IN_PROGRESS = "ERR-efi-Diagnostics--in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ENABLE_MGMT_CONN = "ERR-enable-mgmt-conn"
FSM_RMT_INV_ERR_CODE_ERR_EP_SET_ERROR = "ERR-ep-set-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
FSM_RMT_INV_ERR_CODE_ERR_INSUFFICIENTLY_EQUIPPED = "ERR-insufficiently-equipped"
FSM_RMT_INV_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
FSM_RMT_INV_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
FSM_RMT_INV_ERR_CODE_ERR_MISSING_METHOD = "ERR-missing-method"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GET_ERROR = "ERR-radius-get-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
FSM_RMT_INV_ERR_CODE_ERR_REQUEST_TIMEOUT = "ERR-request-timeout"
FSM_RMT_INV_ERR_CODE_ERR_RESET_ADAPTER = "ERR-reset-adapter"
FSM_RMT_INV_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
FSM_RMT_INV_ERR_CODE_ERR_SECONDARY_NODE = "ERR-secondary-node"
FSM_RMT_INV_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
FSM_RMT_INV_ERR_CODE_ERR_SET_KEY_CERT = "ERR-set-key-cert"
FSM_RMT_INV_ERR_CODE_ERR_SET_LOGIN_PROFILE = "ERR-set-login-profile"
FSM_RMT_INV_ERR_CODE_ERR_SET_MIN_PASSPHRASE_LENGTH = "ERR-set-min-passphrase-length"
FSM_RMT_INV_ERR_CODE_ERR_SET_NETWORK = "ERR-set-network"
FSM_RMT_INV_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
FSM_RMT_INV_ERR_CODE_ERR_SET_PORT_CHANNEL = "ERR-set-port-channel"
FSM_RMT_INV_ERR_CODE_ERR_STORE_PRE_LOGIN_BANNER_MSG = "ERR-store-pre-login-banner-msg"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_PLUS_GET_ERROR = "ERR-tacacs-plus-get-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_1 = "ERR-test-error-1"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_2 = "ERR-test-error-2"
FSM_RMT_INV_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
FSM_RMT_INV_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_PASSWD_EXPIRED = "ERR-user-passwd-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
FSM_RMT_INV_ERR_CODE_ERR_XML_PARSE_ERROR = "ERR-xml-parse-error"
FSM_RMT_INV_ERR_CODE_NONE = "none"
FSM_STAMP_NEVER = "never"
FSM_STATUS_CONFIG_BEGIN = "configBegin"
FSM_STATUS_CONFIG_FAIL = "configFail"
FSM_STATUS_CONFIG_LOCAL = "configLocal"
FSM_STATUS_CONFIG_PEER = "configPeer"
FSM_STATUS_CONFIG_SUCCESS = "configSuccess"
FSM_STATUS_NOP = "nop"
INT_ID_NONE = "none"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
VM_RETENTION_1_DAY = "1-day"
VM_RETENTION_1_HOUR = "1-hour"
VM_RETENTION_1_MIN = "1-min"
VNIC_RETENTION_1_DAY = "1-day"
VNIC_RETENTION_1_HOUR = "1-hour"
VNIC_RETENTION_1_MIN = "1-min"
class VmLifeCyclePolicy(ManagedObject):
"""This is VmLifeCyclePolicy class."""
consts = VmLifeCyclePolicyConsts()
naming_props = set([])
mo_meta = MoMeta("VmLifeCyclePolicy", "vmLifeCyclePolicy", "vm-lc-policy", VersionMeta.Version111j, "InputOutput", 0x3ff, [], ["admin", "pn-policy"], ['orgOrg'], ['eventInst', 'faultInst', 'vmLifeCyclePolicyFsm', 'vmLifeCyclePolicyFsmTask'], ["Get", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_prev": MoPropertyMeta("fsm_prev", "fsmPrev", "string", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, None, None, None, None, ["configBegin", "configFail", "configLocal", "configPeer", "configSuccess", "nop"], []),
"fsm_progr": MoPropertyMeta("fsm_progr", "fsmProgr", "byte", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-100"]),
"fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, None, None, None, None, ["ERR-2fa-auth-retry", "ERR-ACTIVATE-failed", "ERR-ACTIVATE-in-progress", "ERR-ACTIVATE-retry", "ERR-BIOS-TOKENS-OLD-BIOS", "ERR-BIOS-TOKENS-OLD-CIMC", "ERR-BIOS-network-boot-order-not-found", "ERR-BOARDCTRLUPDATE-ignore", "ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNLD-usb-unmounted", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Diagnostics-in-progress", "ERR-Diagnostics-memtest-in-progress", "ERR-Diagnostics-network-in-progress", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-HOST-fru-identity-mismatch", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-IBMC-analyze-results", "ERR-IBMC-connect-error", "ERR-IBMC-connector-info-retrieval-error", "ERR-IBMC-fru-retrieval-error", "ERR-IBMC-invalid-end-point-config", "ERR-IBMC-results-not-ready", "ERR-MAX-subscriptions-allowed-error", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-POWER-CAP-UNSUPPORTED", "ERR-POWER-PROFILE-IN-PROGRESS", "ERR-SERVER-mis-connect", "ERR-SWITCH-invalid-if-config", "ERR-TOKEN-request-denied", "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS", "ERR-UPDATE-failed", "ERR-UPDATE-in-progress", "ERR-UPDATE-retry", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-issue", "ERR-auth-realm-get-error", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-cli-session-limit-reached", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-tp", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-downgrade-fail", "ERR-efi-Diagnostics--in-progress", "ERR-enable-mgmt-conn", "ERR-ep-set-error", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-insufficiently-equipped", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-missing-method", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-get-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-request-timeout", "ERR-reset-adapter", "ERR-role-set-error", "ERR-secondary-node", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-key-cert", "ERR-set-login-profile", "ERR-set-min-passphrase-length", "ERR-set-network", "ERR-set-password-strength-check", "ERR-set-port-channel", "ERR-store-pre-login-banner-msg", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-plus-get-error", "ERR-tacacs-set-error", "ERR-test-error-1", "ERR-test-error-2", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-passwd-expired", "ERR-user-set-error", "ERR-xml-parse-error", "none"], ["0-4294967295"]),
"fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []),
"fsm_rmt_inv_rslt": MoPropertyMeta("fsm_rmt_inv_rslt", "fsmRmtInvRslt", "string", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_stamp": MoPropertyMeta("fsm_stamp", "fsmStamp", "string", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["never"], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, None, None, None, None, ["configBegin", "configFail", "configLocal", "configPeer", "configSuccess", "nop"], []),
"fsm_try": MoPropertyMeta("fsm_try", "fsmTry", "byte", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"int_id": MoPropertyMeta("int_id", "intId", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, None, None, None, None, ["none"], ["0-4294967295"]),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"policy_level": MoPropertyMeta("policy_level", "policyLevel", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["local", "pending-policy", "policy"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x40, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x80, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"vm_retention": MoPropertyMeta("vm_retention", "vmRetention", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["1-day", "1-hour", "1-min"], ["1-7200"]),
"vnic_retention": MoPropertyMeta("vnic_retention", "vnicRetention", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x200, None, None, None, ["1-day", "1-hour", "1-min"], ["1-7200"]),
}
prop_map = {
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"fsmDescr": "fsm_descr",
"fsmPrev": "fsm_prev",
"fsmProgr": "fsm_progr",
"fsmRmtInvErrCode": "fsm_rmt_inv_err_code",
"fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr",
"fsmRmtInvRslt": "fsm_rmt_inv_rslt",
"fsmStageDescr": "fsm_stage_descr",
"fsmStamp": "fsm_stamp",
"fsmStatus": "fsm_status",
"fsmTry": "fsm_try",
"intId": "int_id",
"name": "name",
"policyLevel": "policy_level",
"policyOwner": "policy_owner",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"vmRetention": "vm_retention",
"vnicRetention": "vnic_retention",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.descr = None
self.fsm_descr = None
self.fsm_prev = None
self.fsm_progr = None
self.fsm_rmt_inv_err_code = None
self.fsm_rmt_inv_err_descr = None
self.fsm_rmt_inv_rslt = None
self.fsm_stage_descr = None
self.fsm_stamp = None
self.fsm_status = None
self.fsm_try = None
self.int_id = None
self.name = None
self.policy_level = None
self.policy_owner = None
self.sacl = None
self.status = None
self.vm_retention = None
self.vnic_retention = None
ManagedObject.__init__(self, "VmLifeCyclePolicy", parent_mo_or_dn, **kwargs)
| 89.975904 | 3,774 | 0.759373 |
4a2749e705a341088ce6e0bc3866ad36335e55f2 | 4,388 | py | Python | network/predict_many.py | Neoleukin-Therapeutics/trRosetta | 6a68c33e1c9834c24bd7aa010999d0e9e5279d4d | [
"MIT"
] | 179 | 2019-11-07T06:53:34.000Z | 2022-03-17T13:55:20.000Z | network/predict_many.py | Neoleukin-Therapeutics/trRosetta | 6a68c33e1c9834c24bd7aa010999d0e9e5279d4d | [
"MIT"
] | 11 | 2020-03-29T22:05:08.000Z | 2021-05-12T17:03:26.000Z | network/predict_many.py | Neoleukin-Therapeutics/trRosetta | 6a68c33e1c9834c24bd7aa010999d0e9e5279d4d | [
"MIT"
] | 52 | 2019-10-21T23:42:18.000Z | 2022-03-17T04:27:50.000Z | import warnings, logging, os, sys
warnings.filterwarnings('ignore',category=FutureWarning)
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import json
import tensorflow as tf
from utils import *
from arguments import get_args_many
args = get_args_many()
MDIR = args.MDIR
n2d_layers = 61
n2d_filters = 64
window2d = 3
wmin = 0.8
ns = 21
# load network weights in RAM
w,b,beta_,gamma_ = load_weights(args.MDIR)
#
# network
#
config = tf.ConfigProto(
gpu_options = tf.GPUOptions(allow_growth=True)
)
activation = tf.nn.elu
conv1d = tf.layers.conv1d
conv2d = tf.layers.conv2d
with tf.Graph().as_default():
with tf.name_scope('input'):
ncol = tf.placeholder(dtype=tf.int32, shape=())
nrow = tf.placeholder(dtype=tf.int32, shape=())
msa = tf.placeholder(dtype=tf.uint8, shape=(None,None))
#
# collect features
#
msa1hot = tf.one_hot(msa, ns, dtype=tf.float32)
weights = reweight(msa1hot, wmin)
# 1D features
f1d_seq = msa1hot[0,:,:20]
f1d_pssm = msa2pssm(msa1hot, weights)
f1d = tf.concat(values=[f1d_seq, f1d_pssm], axis=1)
f1d = tf.expand_dims(f1d, axis=0)
f1d = tf.reshape(f1d, [1,ncol,42])
# 2D features
f2d_dca = tf.cond(nrow>1, lambda: fast_dca(msa1hot, weights), lambda: tf.zeros([ncol,ncol,442], tf.float32))
f2d_dca = tf.expand_dims(f2d_dca, axis=0)
f2d = tf.concat([tf.tile(f1d[:,:,None,:], [1,1,ncol,1]),
tf.tile(f1d[:,None,:,:], [1,ncol,1,1]),
f2d_dca], axis=-1)
f2d = tf.reshape(f2d, [1,ncol,ncol,442+2*42])
#
# 2D network
#
# store ensemble of networks in separate branches
layers2d = [[] for _ in range(len(w))]
preds = [[] for _ in range(4)]
Activation = tf.nn.elu
for i in range(len(w)):
layers2d[i].append(Conv2d(f2d,w[i][0],b[i][0]))
layers2d[i].append(InstanceNorm(layers2d[i][-1],beta_[i][0],gamma_[i][0]))
layers2d[i].append(Activation(layers2d[i][-1]))
# resnet
idx = 1
dilation = 1
for _ in range(n2d_layers):
layers2d[i].append(Conv2d(layers2d[i][-1],w[i][idx],b[i][idx],dilation))
layers2d[i].append(InstanceNorm(layers2d[i][-1],beta_[i][idx],gamma_[i][idx]))
layers2d[i].append(Activation(layers2d[i][-1]))
idx += 1
layers2d[i].append(Conv2d(layers2d[i][-1],w[i][idx],b[i][idx],dilation))
layers2d[i].append(InstanceNorm(layers2d[i][-1],beta_[i][idx],gamma_[i][idx]))
layers2d[i].append(Activation(layers2d[i][-1] + layers2d[i][-6]))
idx += 1
dilation *= 2
if dilation > 16:
dilation = 1
# probabilities for theta and phi
preds[0].append(tf.nn.softmax(Conv2d(layers2d[i][-1],w[i][123],b[i][123]))[0])
preds[1].append(tf.nn.softmax(Conv2d(layers2d[i][-1],w[i][124],b[i][124]))[0])
# symmetrize
layers2d[i].append(0.5*(layers2d[i][-1]+tf.transpose(layers2d[i][-1],perm=[0,2,1,3])))
# probabilities for dist and omega
preds[2].append(tf.nn.softmax(Conv2d(layers2d[i][-1],w[i][125],b[i][125]))[0])
preds[3].append(tf.nn.softmax(Conv2d(layers2d[i][-1],w[i][127],b[i][127]))[0])
#preds[4].append(tf.nn.softmax(Conv2d(layers2d[i][-1],w[i][126],b[i][126]))[0])
# average over all branches
prob_theta = tf.reduce_mean(tf.stack(preds[0]),axis=0)
prob_phi = tf.reduce_mean(tf.stack(preds[1]),axis=0)
prob_dist = tf.reduce_mean(tf.stack(preds[2]),axis=0)
prob_omega = tf.reduce_mean(tf.stack(preds[3]),axis=0)
with tf.Session(config=config) as sess:
# loop over all A3M files in the imput folder
for filename in os.listdir(args.ALNDIR):
if not filename.endswith(".a3m"):
continue
# parse & predict
a3m = parse_a3m(args.ALNDIR + '/' + filename)
print("processing:", filename)
pd, pt, pp, po = sess.run([prob_dist, prob_theta, prob_phi, prob_omega],
feed_dict = {msa : a3m, ncol : a3m.shape[1], nrow : a3m.shape[0] })
# save distograms & anglegrams
npz_file = args.NPZDIR + '/' + filename[:-3] + 'npz'
np.savez_compressed(npz_file, dist=pd, omega=po, theta=pt, phi=pp)
| 32.264706 | 112 | 0.591613 |
4a2749ebffed0ee537968d7cdb32a8bc1cb54427 | 2,450 | py | Python | domain_management/domainutil.py | IBM/api-samples | d69f7a7b74244012e835f023bceec2b0370be586 | [
"Apache-2.0"
] | 172 | 2015-02-18T21:18:31.000Z | 2022-03-13T17:51:11.000Z | domain_management/domainutil.py | IBM/api-samples | d69f7a7b74244012e835f023bceec2b0370be586 | [
"Apache-2.0"
] | 15 | 2015-03-06T15:30:33.000Z | 2021-03-05T09:20:34.000Z | domain_management/domainutil.py | IBM/api-samples | d69f7a7b74244012e835f023bceec2b0370be586 | [
"Apache-2.0"
] | 77 | 2015-03-06T13:14:06.000Z | 2022-02-14T15:35:58.000Z | """Domain utilities used by sample scripts for Domain API.
"""
import json
import os
import sys
import uuid
from importlib import import_module
sys.path.append(os.path.realpath('../modules'))
_RestApiClient = import_module('RestApiClient')
_SampleUtilities = import_module('SampleUtilities')
_client = _RestApiClient.RestApiClient(version='6.0')
# Pretty-prints response.
pp_response = _SampleUtilities.pretty_print_response
def api(endpoint, method, data=None, json=False):
"""Invokes RestApiClient call_api method and pretty-prints the request.
"""
if json:
headers = _client.get_headers().copy()
headers['Content-type'] = 'application/json'
else:
headers = None
return _client.call_api(endpoint, method, headers=headers, data=data,
print_request=True)
def from_json(response):
"""Converts RestApiClient response from JSON to string.
"""
return json.loads(response.read().decode('utf-8'))
def to_json(data):
"""Converts Python data to JSON.
"""
return json.dumps(data).encode('utf8')
def setup_domain():
"""Sets up a domain with event collector ID = -1000 and returns this new
domain.
The domain name is a randomly generated UUID. The event collector ID is
chosen to be not among existing IDs.
If the data for the event collector ID already exists, re-use the domain
with that event collector configured.
"""
body = {
"asset_scanner_ids": [],
"custom_properties": [],
"deleted": False,
"description": "",
"event_collector_ids": [-1000], # Assign non-existing ID
"flow_collector_ids": [],
"flow_source_ids": [],
"log_source_group_ids": [],
"log_source_ids": [],
"name": str(uuid.uuid4()), # Generate a random domain name
"qvm_scanner_ids": [],
"tenant_id": 0
}
response = api('config/domain_management/domains', 'POST',
data=to_json(body), json=True)
if response.code == 201:
return from_json(response)
elif response.code == 409:
# Finds the domain ID for conflicting resource.
resp = api('config/domain_management/domains', 'GET')
domains = from_json(resp)
for domain in domains:
if -1000 in domain['event_collector_ids']:
return domain
print('ERROR: Unrecognized conflict error', file=sys.stderr)
| 29.518072 | 76 | 0.64898 |
4a274a2db9c0c59ef0c85a11dc81fec3f77847b4 | 203 | py | Python | 2016/02/test_lavvy_lock.py | GeoffRiley/AdventOfCode | 27fe8670a1923cb3b0675784f5e855ad18c29c93 | [
"Unlicense"
] | 2 | 2020-12-12T03:18:45.000Z | 2021-12-17T00:35:33.000Z | 2016/02/test_lavvy_lock.py | GeoffRiley/AdventOfCode | 27fe8670a1923cb3b0675784f5e855ad18c29c93 | [
"Unlicense"
] | null | null | null | 2016/02/test_lavvy_lock.py | GeoffRiley/AdventOfCode | 27fe8670a1923cb3b0675784f5e855ad18c29c93 | [
"Unlicense"
] | null | null | null | from lavvy_lock import lavvy_decode
def test_lavvy_decode():
test_code = '''ULL
RRDDD
LURDL
UUUUD'''
assert lavvy_decode(test_code) == '1985'
assert lavvy_decode(test_code, True) == '5DB3'
| 18.454545 | 50 | 0.714286 |
4a274b652127e10dc02a8f78940f1ad1c2739737 | 1,870 | py | Python | flask_reveal/tests/test_base.py | grupydf/flask-reveal | 6a60c9db68db68d18eae9734ac809158157ed293 | [
"MIT"
] | 2 | 2015-04-23T14:16:08.000Z | 2015-04-23T14:19:26.000Z | flask_reveal/tests/test_base.py | grupydf/flask-reveal | 6a60c9db68db68d18eae9734ac809158157ed293 | [
"MIT"
] | null | null | null | flask_reveal/tests/test_base.py | grupydf/flask-reveal | 6a60c9db68db68d18eae9734ac809158157ed293 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import unittest
import tempfile
from flask import current_app
from flask_reveal.app import FlaskReveal
from flask_reveal.config import REVEAL_CONFIG, REVEAL_META
from flask_reveal.blueprints.reveal import reveal_blueprint
class BaseAppTestCase(unittest.TestCase):
def setUp(self):
self.app = FlaskReveal('flask_reveal')
self.app.config['TESTING'] = True
fd, self.config = tempfile.mkstemp('.py')
os.close(fd)
def tearDown(self):
os.remove(self.config)
def test_start_invalid_config(self):
self.assertRaises(FileNotFoundError,
self.app.start,
'', '', 'invalid_file')
def test_current_app(self):
with self.app.app_context():
self.assertEqual(current_app.name, 'flask_reveal')
def test_blueprint_loading(self):
with self.app.app_context():
self.assertDictEqual(current_app.blueprints,
{'reveal': reveal_blueprint})
def test_default_config_loading(self):
with self.app.app_context():
self.assertDictEqual(current_app.config['REVEAL_META'],
REVEAL_META)
self.assertDictEqual(current_app.config['REVEAL_CONFIG'],
REVEAL_CONFIG)
def test_user_config_loading(self):
with open(self.config, 'w') as config:
config.write('TEST_VAR = "TEST"')
self.app.load_user_config('', '', self.config)
with self.app.app_context():
self.assertEqual(current_app.config['TEST_VAR'], 'TEST')
def test_user_config_loading_invalid_config_file(self):
self.assertRaises(FileNotFoundError,
self.app.load_user_config,
'', '', 'invalid_file')
| 30.655738 | 69 | 0.615508 |
4a274b8640c1fd38bf0922dc720e98ebe27c8e6f | 13,881 | py | Python | tensorflow_probability/python/distributions/onehot_categorical_test.py | nbro/probability | 07a6378155f0ed720b5aaccf5387e3f9a432bd10 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/onehot_categorical_test.py | nbro/probability | 07a6378155f0ed720b5aaccf5387e3f9a432bd10 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/onehot_categorical_test.py | nbro/probability | 07a6378155f0ed720b5aaccf5387e3f9a432bd10 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for OneHotCategorical distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
tfb = tfp.bijectors
tfd = tfp.distributions
def make_onehot_categorical(batch_shape, num_classes, dtype=tf.int32):
logits = tf.random.uniform(
list(batch_shape) + [num_classes], -10, 10, dtype=tf.float32) - 50.
return tfd.OneHotCategorical(logits, dtype=dtype, validate_args=True)
@test_util.test_all_tf_execution_regimes
class OneHotCategoricalTest(test_util.TestCase):
def setUp(self):
super(OneHotCategoricalTest, self).setUp()
self._rng = np.random.RandomState(42)
def assertRaises(self, error_class, msg):
if tf.executing_eagerly():
return self.assertRaisesRegexp(error_class, msg)
return self.assertRaisesOpError(msg)
def testP(self):
p = [0.2, 0.8]
dist = tfd.OneHotCategorical(probs=p, validate_args=True)
self.assertAllClose(p, self.evaluate(dist.probs))
self.assertAllEqual([2], dist.logits.shape)
def testLogits(self):
p = np.array([0.2, 0.8], dtype=np.float32)
logits = np.log(p) - 50.
dist = tfd.OneHotCategorical(logits=logits, validate_args=True)
self.assertAllEqual([2], dist.probs.shape)
self.assertAllEqual([2], dist.logits.shape)
self.assertAllClose(self.evaluate(dist.probs), p)
self.assertAllClose(self.evaluate(dist.logits), logits)
def testShapes(self):
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_onehot_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape,
tensorshape_util.as_list(dist.batch_shape))
self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor()))
self.assertAllEqual([10], tensorshape_util.as_list(dist.event_shape))
self.assertAllEqual([10], self.evaluate(dist.event_shape_tensor()))
# event_shape is available as a constant because the shape is
# known at graph build time.
self.assertEqual(10, dist.event_shape)
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_onehot_categorical(batch_shape, tf.constant(
10, dtype=tf.int32))
self.assertAllEqual(
len(batch_shape), tensorshape_util.rank(dist.batch_shape))
self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor()))
self.assertAllEqual([10], tensorshape_util.as_list(dist.event_shape))
self.assertEqual(10, self.evaluate(dist.event_shape_tensor()))
def testDtype(self):
dist = make_onehot_categorical([], 5, dtype=tf.int32)
self.assertEqual(dist.dtype, tf.int32)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
dist = make_onehot_categorical([], 5, dtype=tf.int64)
self.assertEqual(dist.dtype, tf.int64)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
self.assertEqual(dist.probs.dtype, tf.float32)
self.assertEqual(dist.logits.dtype, tf.float32)
self.assertEqual(dist.logits.dtype, dist.entropy().dtype)
self.assertEqual(dist.logits.dtype, dist.prob(
np.array([1]+[0]*4, dtype=np.int64)).dtype)
self.assertEqual(dist.logits.dtype, dist.log_prob(
np.array([1]+[0]*4, dtype=np.int64)).dtype)
def testUnknownShape(self):
logits = tf1.placeholder_with_default(
[[-1000.0, 1000.0], [1000.0, -1000.0]], shape=None)
dist = tfd.OneHotCategorical(logits, validate_args=True)
sample = dist.sample()
# Batch entry 0 will sample class 1, batch entry 1 will sample class 0.
sample_value_batch = self.evaluate(sample)
self.assertAllEqual([[0, 1], [1, 0]], sample_value_batch)
def testUnknownAndInvalidShape(self):
logits = tf1.placeholder_with_default(19.84, shape=None)
with self.assertRaises(
ValueError, 'Argument `logits` must have rank at least 1.'):
dist = tfd.OneHotCategorical(logits, validate_args=True)
self.evaluate(dist.sample())
logits = tf1.placeholder_with_default([[], []], shape=None)
with self.assertRaises(
ValueError, 'Argument `logits` must have final dimension >= 1.'):
dist = tfd.OneHotCategorical(logits, validate_args=True)
self.evaluate(dist.sample())
def testEntropyNoBatch(self):
logits = np.log([0.2, 0.8]) - 50.
dist = tfd.OneHotCategorical(logits, validate_args=True)
self.assertAllClose(self.evaluate(dist.entropy()),
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),
atol=1e-5, rtol=1e-5)
def testEntropyWithBatch(self):
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
dist = tfd.OneHotCategorical(logits, validate_args=True)
self.assertAllClose(
self.evaluate(dist.entropy()),
[-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),
-(0.6 * np.log(0.6) + 0.4 * np.log(0.4))],
atol=1e-5, rtol=1e-5)
def testPmf(self):
# check that probability of samples correspond to their class probabilities
logits = self._rng.random_sample(size=(8, 2, 10))
prob = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)
dist = tfd.OneHotCategorical(logits=logits, validate_args=True)
np_sample = self.evaluate(dist.sample())
np_prob = self.evaluate(dist.prob(np_sample))
expected_prob = prob[np_sample.astype(np.bool)]
self.assertAllClose(expected_prob, np_prob.flatten())
def testEventSizeOfOne(self):
d = tfd.OneHotCategorical(
probs=tf1.placeholder_with_default([1.], shape=None),
validate_args=True)
self.assertAllEqual(np.ones((5, 3, 1), dtype=np.int32),
self.evaluate(d.sample([5, 3])))
self.assertAllClose([0., 0., 0., 0., 0.],
self.evaluate(d.log_prob(tf.ones((5, 1)))))
self.assertAllClose(0., self.evaluate(d.entropy()))
self.assertAllClose([0.], self.evaluate(d.variance()))
self.assertAllClose([[0.]], self.evaluate(d.covariance()))
self.assertAllClose([1.], self.evaluate(d.mode()))
self.assertAllClose([1.], self.evaluate(d.mean()))
def testSample(self):
probs = [[[0.2, 0.8], [0.4, 0.6]]]
dist = tfd.OneHotCategorical(tf.math.log(probs) - 50., validate_args=True)
n = 100
samples = dist.sample(n, seed=test_util.test_seed())
self.assertEqual(samples.dtype, tf.int32)
sample_values = self.evaluate(samples)
self.assertAllEqual([n, 1, 2, 2], sample_values.shape)
self.assertFalse(np.any(sample_values < 0))
self.assertFalse(np.any(sample_values > 1))
def testSampleWithSampleShape(self):
probs = [[[0.2, 0.8], [0.4, 0.6]]]
dist = tfd.OneHotCategorical(tf.math.log(probs) - 50., validate_args=True)
samples = dist.sample((100, 100), seed=test_util.test_seed())
prob = dist.prob(samples)
prob_val = self.evaluate(prob)
self.assertAllClose(
[0.2**2 + 0.8**2], [prob_val[:, :, :, 0].mean()], atol=1e-2)
self.assertAllClose(
[0.4**2 + 0.6**2], [prob_val[:, :, :, 1].mean()], atol=1e-2)
def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)
for categories in [2, 10]:
for batch_size in [1, 2]:
p_logits = self._rng.random_sample((batch_size, categories))
q_logits = self._rng.random_sample((batch_size, categories))
p = tfd.OneHotCategorical(logits=p_logits, validate_args=True)
q = tfd.OneHotCategorical(logits=q_logits, validate_args=True)
prob_p = np_softmax(p_logits)
prob_q = np_softmax(q_logits)
kl_expected = np.sum(
prob_p * (np.log(prob_p) - np.log(prob_q)), axis=-1)
kl_actual = tfd.kl_divergence(p, q)
kl_same = tfd.kl_divergence(p, p)
x = p.sample(int(2e4), seed=test_util.test_seed())
x = tf.cast(x, dtype=tf.float32)
# Compute empirical KL(p||q).
kl_sample = tf.reduce_mean(
input_tensor=p.log_prob(x) - q.log_prob(x), axis=0)
[kl_sample_, kl_actual_,
kl_same_] = self.evaluate([kl_sample, kl_actual, kl_same])
self.assertEqual(kl_actual.shape, (batch_size,))
self.assertAllClose(kl_same_, np.zeros_like(kl_expected))
self.assertAllClose(kl_actual_, kl_expected, atol=0., rtol=1e-4)
self.assertAllClose(kl_sample_, kl_expected, atol=1e-2, rtol=0.)
def testSampleUnbiasedNonScalarBatch(self):
logits = self._rng.rand(4, 3, 2).astype(np.float32)
dist = tfd.OneHotCategorical(logits=logits, validate_args=True)
n = int(3e3)
x = dist.sample(n, seed=test_util.test_seed())
x = tf.cast(x, dtype=tf.float32)
sample_mean = tf.reduce_mean(input_tensor=x, axis=0)
x_centered = tf.transpose(a=x - sample_mean, perm=[1, 2, 3, 0])
sample_covariance = tf.matmul(x_centered, x_centered, adjoint_b=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = self.evaluate([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4, 3, 2], sample_mean.shape)
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.07)
self.assertAllEqual([4, 3, 2, 2], sample_covariance.shape)
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.10)
def testSampleUnbiasedScalarBatch(self):
logits = self._rng.rand(3).astype(np.float32)
dist = tfd.OneHotCategorical(logits=logits, validate_args=True)
n = int(1e4)
x = dist.sample(n, seed=test_util.test_seed())
x = tf.cast(x, dtype=tf.float32)
sample_mean = tf.reduce_mean(input_tensor=x, axis=0) # elementwise mean
x_centered = x - sample_mean
sample_covariance = tf.matmul(x_centered, x_centered, adjoint_a=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = self.evaluate([
sample_mean,
sample_covariance,
dist.probs,
dist.covariance(),
])
self.assertAllEqual([3], sample_mean.shape)
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.1)
self.assertAllEqual([3, 3], sample_covariance.shape)
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.1)
def testParamTensorFromLogits(self):
x = tf.constant([-1., 0.5, 1.])
d = tfd.OneHotCategorical(logits=x, validate_args=True)
self.assertAllClose(
*self.evaluate([x, d.logits_parameter()]),
atol=0, rtol=1e-4)
self.assertAllClose(
*self.evaluate([tf.math.softmax(x),
d.probs_parameter()]),
atol=0,
rtol=1e-4)
def testParamTensorFromProbs(self):
x = tf.constant([0.1, 0.5, 0.4])
d = tfd.OneHotCategorical(probs=x, validate_args=True)
self.assertAllClose(
*self.evaluate([tf.math.log(x), d.logits_parameter()]),
atol=0, rtol=1e-4)
self.assertAllClose(
*self.evaluate([x, d.probs_parameter()]),
atol=0, rtol=1e-4)
@test_util.test_all_tf_execution_regimes
class OneHotCategoricalFromVariableTest(test_util.TestCase):
def testGradientLogits(self):
x = tf.Variable([-1., 0., 1])
d = tfd.OneHotCategorical(logits=x, validate_args=True)
with tf.GradientTape() as tape:
loss = -d.log_prob([[1, 0, 0], [0, 0, 1]])
g = tape.gradient(loss, d.trainable_variables)
self.assertLen(g, 1)
self.assertAllNotNone(g)
def testGradientProbs(self):
x = tf.Variable([0.1, 0.7, 0.2])
d = tfd.OneHotCategorical(probs=x, validate_args=True)
with tf.GradientTape() as tape:
loss = -d.log_prob([[1, 0, 0], [0, 0, 1]])
g = tape.gradient(loss, d.trainable_variables)
self.assertLen(g, 1)
self.assertAllNotNone(g)
def testAssertionsProbs(self):
x = tf.Variable([0.1, 0.7, 0.0])
with self.assertRaisesOpError('Argument `probs` must sum to 1.'):
d = tfd.OneHotCategorical(probs=x, validate_args=True)
self.evaluate([v.initializer for v in d.variables])
self.evaluate(d.entropy())
def testAssertionsProbsAfterMutation(self):
x = tf.Variable([0.25, 0.25, 0.5])
d = tfd.OneHotCategorical(probs=x, validate_args=True)
with self.assertRaisesOpError('Condition x >= 0 did not hold element-wise'):
self.evaluate([v.initializer for v in d.variables])
with tf.control_dependencies([x.assign([-0.25, 0.75, 0.5])]):
self.evaluate(d.logits_parameter())
def testAssertionsLogits(self):
x = tfp.util.TransformedVariable(0., tfb.Identity(), shape=None)
with self.assertRaisesRegexp(
ValueError, 'Argument `logits` must have rank at least 1.'):
d = tfd.OneHotCategorical(logits=x, validate_args=True)
self.evaluate([v.initializer for v in d.variables])
self.evaluate(d.entropy())
if __name__ == '__main__':
tf.test.main()
| 40.469388 | 80 | 0.670413 |
4a274c32ed647884f878b0eb041d24272785eeb6 | 39,591 | py | Python | svg2mod/svg2mod.py | al3ph/svg2mod | a6fb251b99b048920a4d92a6558243bc5ee4af6c | [
"CC0-1.0"
] | null | null | null | svg2mod/svg2mod.py | al3ph/svg2mod | a6fb251b99b048920a4d92a6558243bc5ee4af6c | [
"CC0-1.0"
] | null | null | null | svg2mod/svg2mod.py | al3ph/svg2mod | a6fb251b99b048920a4d92a6558243bc5ee4af6c | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/python
from __future__ import absolute_import
import argparse
import datetime
import os
from pprint import pformat, pprint
import re
import svg2mod.svg as svg
import sys
#----------------------------------------------------------------------------
DEFAULT_DPI = 96 # 96 as of Inkscape 0.92
def main():
args, parser = get_arguments()
pretty = args.format == 'pretty'
use_mm = args.units == 'mm'
if pretty:
if not use_mm:
print( "Error: decimil units only allowed with legacy output type" )
sys.exit( -1 )
#if args.include_reverse:
#print(
#"Warning: reverse footprint not supported or required for" +
#" pretty output format"
#)
# Import the SVG:
imported = Svg2ModImport(
args.input_file_name,
args.module_name,
args.module_value
)
# Pick an output file name if none was provided:
if args.output_file_name is None:
args.output_file_name = os.path.splitext(
os.path.basename( args.input_file_name )
)[ 0 ]
# Append the correct file name extension if needed:
if pretty:
extension = ".kicad_mod"
else:
extension = ".mod"
if args.output_file_name[ - len( extension ) : ] != extension:
args.output_file_name += extension
# Create an exporter:
if pretty:
exported = Svg2ModExportPretty(
imported,
args.output_file_name,
args.scale_factor,
args.precision,
args.dpi,
)
else:
# If the module file exists, try to read it:
exported = None
if os.path.isfile( args.output_file_name ):
try:
exported = Svg2ModExportLegacyUpdater(
imported,
args.output_file_name,
args.scale_factor,
args.precision,
args.dpi,
include_reverse = not args.front_only,
)
except Exception as e:
raise e
#print( e.message )
#exported = None
# Write the module file:
if exported is None:
exported = Svg2ModExportLegacy(
imported,
args.output_file_name,
args.scale_factor,
args.precision,
use_mm = use_mm,
dpi = args.dpi,
include_reverse = not args.front_only,
)
# Export the footprint:
exported.write()
#----------------------------------------------------------------------------
class LineSegment( object ):
#------------------------------------------------------------------------
@staticmethod
def _on_segment( p, q, r ):
""" Given three colinear points p, q, and r, check if
point q lies on line segment pr. """
if (
q.x <= max( p.x, r.x ) and
q.x >= min( p.x, r.x ) and
q.y <= max( p.y, r.y ) and
q.y >= min( p.y, r.y )
):
return True
return False
#------------------------------------------------------------------------
@staticmethod
def _orientation( p, q, r ):
""" Find orientation of ordered triplet (p, q, r).
Returns following values
0 --> p, q and r are colinear
1 --> Clockwise
2 --> Counterclockwise
"""
val = (
( q.y - p.y ) * ( r.x - q.x ) -
( q.x - p.x ) * ( r.y - q.y )
)
if val == 0: return 0
if val > 0: return 1
return 2
#------------------------------------------------------------------------
def __init__( self, p = None, q = None ):
self.p = p
self.q = q
#------------------------------------------------------------------------
def connects( self, segment ):
if self.q.x == segment.p.x and self.q.y == segment.p.y: return True
if self.q.x == segment.q.x and self.q.y == segment.q.y: return True
if self.p.x == segment.p.x and self.p.y == segment.p.y: return True
if self.p.x == segment.q.x and self.p.y == segment.q.y: return True
return False
#------------------------------------------------------------------------
def intersects( self, segment ):
""" Return true if line segments 'p1q1' and 'p2q2' intersect.
Adapted from:
http://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/
"""
# Find the four orientations needed for general and special cases:
o1 = self._orientation( self.p, self.q, segment.p )
o2 = self._orientation( self.p, self.q, segment.q )
o3 = self._orientation( segment.p, segment.q, self.p )
o4 = self._orientation( segment.p, segment.q, self.q )
return (
# General case:
( o1 != o2 and o3 != o4 )
or
# p1, q1 and p2 are colinear and p2 lies on segment p1q1:
( o1 == 0 and self._on_segment( self.p, segment.p, self.q ) )
or
# p1, q1 and p2 are colinear and q2 lies on segment p1q1:
( o2 == 0 and self._on_segment( self.p, segment.q, self.q ) )
or
# p2, q2 and p1 are colinear and p1 lies on segment p2q2:
( o3 == 0 and self._on_segment( segment.p, self.p, segment.q ) )
or
# p2, q2 and q1 are colinear and q1 lies on segment p2q2:
( o4 == 0 and self._on_segment( segment.p, self.q, segment.q ) )
)
#------------------------------------------------------------------------
def q_next( self, q ):
self.p = self.q
self.q = q
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class PolygonSegment( object ):
#------------------------------------------------------------------------
def __init__( self, points ):
self.points = points
if len( points ) < 3:
print(
"Warning:"
" Path segment has only {} points (not a polygon?)".format(
len( points )
)
)
#------------------------------------------------------------------------
# KiCad will not "pick up the pen" when moving between a polygon outline
# and holes within it, so we search for a pair of points connecting the
# outline (self) to the hole such that the connecting segment will not
# cross the visible inner space within any hole.
def _find_insertion_point( self, hole, holes ):
#print( " Finding insertion point. {} holes".format( len( holes ) ) )
# Try the next point on the container:
for cp in range( len( self.points ) ):
container_point = self.points[ cp ]
#print( " Trying container point {}".format( cp ) )
# Try the next point on the hole:
for hp in range( len( hole.points ) - 1 ):
hole_point = hole.points[ hp ]
#print( " Trying hole point {}".format( cp ) )
bridge = LineSegment( container_point, hole_point )
# Check for intersection with each other hole:
for other_hole in holes:
#print( " Trying other hole. Check = {}".format( hole == other_hole ) )
# If the other hole intersects, don't bother checking
# remaining holes:
if other_hole.intersects(
bridge,
check_connects = (
other_hole == hole or other_hole == self
)
): break
#print( " Hole does not intersect." )
else:
print( " Found insertion point: {}, {}".format( cp, hp ) )
# No other holes intersected, so this insertion point
# is acceptable:
return ( cp, hole.points_starting_on_index( hp ) )
print(
"Could not insert segment without overlapping other segments"
)
#------------------------------------------------------------------------
# Return the list of ordered points starting on the given index, ensuring
# that the first and last points are the same.
def points_starting_on_index( self, index ):
points = self.points
if index > 0:
# Strip off end point, which is a duplicate of the start point:
points = points[ : -1 ]
points = points[ index : ] + points[ : index ]
points.append(
svg.Point( points[ 0 ].x, points[ 0 ].y )
)
return points
#------------------------------------------------------------------------
# Return a list of points with the given polygon segments (paths) inlined.
def inline( self, segments ):
if len( segments ) < 1:
return self.points
print( " Inlining {} segments...".format( len( segments ) ) )
all_segments = segments[ : ] + [ self ]
insertions = []
# Find the insertion point for each hole:
for hole in segments:
insertion = self._find_insertion_point(
hole, all_segments
)
if insertion is not None:
insertions.append( insertion )
insertions.sort( key = lambda i: i[ 0 ] )
inlined = [ self.points[ 0 ] ]
ip = 1
points = self.points
for insertion in insertions:
while ip <= insertion[ 0 ]:
inlined.append( points[ ip ] )
ip += 1
if (
inlined[ -1 ].x == insertion[ 1 ][ 0 ].x and
inlined[ -1 ].y == insertion[ 1 ][ 0 ].y
):
inlined += insertion[ 1 ][ 1 : -1 ]
else:
inlined += insertion[ 1 ]
inlined.append( svg.Point(
points[ ip - 1 ].x,
points[ ip - 1 ].y,
) )
while ip < len( points ):
inlined.append( points[ ip ] )
ip += 1
return inlined
#------------------------------------------------------------------------
def intersects( self, line_segment, check_connects ):
hole_segment = LineSegment()
# Check each segment of other hole for intersection:
for point in self.points:
hole_segment.q_next( point )
if hole_segment.p is not None:
if (
check_connects and
line_segment.connects( hole_segment )
): continue
if line_segment.intersects( hole_segment ):
#print( "Intersection detected." )
return True
return False
#------------------------------------------------------------------------
# Apply all transformations and rounding, then remove duplicate
# consecutive points along the path.
def process( self, transformer, flip, fill ):
points = []
for point in self.points:
point = transformer.transform_point( point, flip )
if (
len( points ) < 1 or
point.x != points[ -1 ].x or
point.y != points[ -1 ].y
):
points.append( point )
if (
points[ 0 ].x != points[ -1 ].x or
points[ 0 ].y != points[ -1 ].y
):
#print( "Warning: Closing polygon. start=({}, {}) end=({}, {})".format(
#points[ 0 ].x, points[ 0 ].y,
#points[ -1 ].x, points[ -1 ].y,
#) )
if fill:
points.append( svg.Point(
points[ 0 ].x,
points[ 0 ].y,
) )
#else:
#print( "Polygon closed: start=({}, {}) end=({}, {})".format(
#points[ 0 ].x, points[ 0 ].y,
#points[ -1 ].x, points[ -1 ].y,
#) )
self.points = points
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModImport( object ):
#------------------------------------------------------------------------
def __init__( self, file_name, module_name, module_value ):
self.file_name = file_name
self.module_name = module_name
self.module_value = module_value
print( "Parsing SVG..." )
self.svg = svg.parse( file_name )
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExport( object ):
#------------------------------------------------------------------------
@staticmethod
def _convert_decimil_to_mm( decimil ):
return float( decimil ) * 0.00254
#------------------------------------------------------------------------
@staticmethod
def _convert_mm_to_decimil( mm ):
return int( round( mm * 393.700787 ) )
#------------------------------------------------------------------------
def _get_fill_stroke( self, item ):
fill = True
stroke = True
stroke_width = 0.0
if item.style is not None and item.style != "":
for property in item.style.split( ";" ):
nv = property.split( ":" );
name = nv[ 0 ].strip()
value = nv[ 1 ].strip()
if name == "fill" and value == "none":
fill = False
elif name == "stroke" and value == "none":
stroke = False
elif name == "stroke-width":
if value.endswith("px"):
value = value.replace( "px", "" )
stroke_width = float( value ) * 25.4 / float(self.dpi)
else:
stroke_width = float( value )
if not stroke:
stroke_width = 0.0
elif stroke_width is None:
# Give a default stroke width?
stroke_width = self._convert_decimil_to_mm( 1 )
return fill, stroke, stroke_width
#------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
scale_factor = 1.0,
precision = 20.0,
use_mm = True,
dpi = DEFAULT_DPI,
):
if use_mm:
# 25.4 mm/in;
scale_factor *= 25.4 / float(dpi)
use_mm = True
else:
# PCBNew uses "decimil" (10K DPI);
scale_factor *= 10000.0 / float(dpi)
self.imported = svg2mod_import
self.file_name = file_name
self.scale_factor = scale_factor
self.precision = precision
self.use_mm = use_mm
self.dpi = dpi
#------------------------------------------------------------------------
def _calculate_translation( self ):
min_point, max_point = self.imported.svg.bbox()
# Center the drawing:
adjust_x = min_point.x + ( max_point.x - min_point.x ) / 2.0
adjust_y = min_point.y + ( max_point.y - min_point.y ) / 2.0
self.translation = svg.Point(
0.0 - adjust_x,
0.0 - adjust_y,
)
#------------------------------------------------------------------------
# Find and keep only the layers of interest.
def _prune( self, items = None ):
if items is None:
self.layers = {}
for name in self.layer_map.iterkeys():
self.layers[ name ] = None
items = self.imported.svg.items
self.imported.svg.items = []
for item in items:
if not isinstance( item, svg.Group ):
continue
for name in self.layers.iterkeys():
#if re.search( name, item.name, re.I ):
if name == item.name:
print( "Found SVG layer: {}".format( item.name ) )
self.imported.svg.items.append( item )
self.layers[ name ] = item
break
else:
self._prune( item.items )
#------------------------------------------------------------------------
def _write_items( self, items, layer, flip = False ):
for item in items:
if isinstance( item, svg.Group ):
self._write_items( item.items, layer, flip )
continue
elif isinstance( item, svg.Path ):
segments = [
PolygonSegment( segment )
for segment in item.segments(
precision = self.precision
)
]
fill, stroke, stroke_width = self._get_fill_stroke( item )
for segment in segments:
segment.process( self, flip, fill )
if len( segments ) > 1:
points = segments[ 0 ].inline( segments[ 1 : ] )
elif len( segments ) > 0:
points = segments[ 0 ].points
if not self.use_mm:
stroke_width = self._convert_mm_to_decimil(
stroke_width
)
print( " Writing polygon with {} points".format(
len( points ) )
)
self._write_polygon(
points, layer, fill, stroke, stroke_width
)
else:
print( "Unsupported SVG element: {}".format(
item.__class__.__name__
) )
#------------------------------------------------------------------------
def _write_module( self, front ):
module_name = self._get_module_name( front )
min_point, max_point = self.imported.svg.bbox()
min_point = self.transform_point( min_point, flip = False )
max_point = self.transform_point( max_point, flip = False )
label_offset = 1200
label_size = 600
label_pen = 120
if self.use_mm:
label_size = self._convert_decimil_to_mm( label_size )
label_pen = self._convert_decimil_to_mm( label_pen )
reference_y = min_point.y - self._convert_decimil_to_mm( label_offset )
value_y = max_point.y + self._convert_decimil_to_mm( label_offset )
else:
reference_y = min_point.y - label_offset
value_y = max_point.y + label_offset
self._write_module_header(
label_size, label_pen,
reference_y, value_y,
front,
)
for name, group in self.layers.iteritems():
if group is None: continue
layer = self._get_layer_name( name, front )
#print( " Writing layer: {}".format( name ) )
self._write_items( group.items, layer, not front )
self._write_module_footer( front )
#------------------------------------------------------------------------
def _write_polygon_filled( self, points, layer, stroke_width = 0.0 ):
self._write_polygon_header( points, layer )
for point in points:
self._write_polygon_point( point )
self._write_polygon_footer( layer, stroke_width )
#------------------------------------------------------------------------
def _write_polygon_outline( self, points, layer, stroke_width ):
prior_point = None
for point in points:
if prior_point is not None:
self._write_polygon_segment(
prior_point, point, layer, stroke_width
)
prior_point = point
#------------------------------------------------------------------------
def transform_point( self, point, flip = False ):
transformed_point = svg.Point(
( point.x + self.translation.x ) * self.scale_factor,
( point.y + self.translation.y ) * self.scale_factor,
)
if flip:
transformed_point.x *= -1
if self.use_mm:
transformed_point.x = round( transformed_point.x, 12 )
transformed_point.y = round( transformed_point.y, 12 )
else:
transformed_point.x = int( round( transformed_point.x ) )
transformed_point.y = int( round( transformed_point.y ) )
return transformed_point
#------------------------------------------------------------------------
def write( self ):
self._prune()
# Must come after pruning:
translation = self._calculate_translation()
print( "Writing module file: {}".format( self.file_name ) )
self.output_file = open( self.file_name, 'w' )
self._write_library_intro()
self._write_modules()
self.output_file.close()
self.output_file = None
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExportLegacy( Svg2ModExport ):
layer_map = {
#'inkscape-name' : [ kicad-front, kicad-back ],
'Cu' : [ 15, 0 ],
'Adhes' : [ 17, 16 ],
'Paste' : [ 19, 18 ],
'SilkS' : [ 21, 20 ],
'Mask' : [ 23, 22 ],
'Dwgs.User' : [ 24, 24 ],
'Cmts.User' : [ 25, 25 ],
'Eco1.User' : [ 26, 26 ],
'Eco2.User' : [ 27, 27 ],
'Edge.Cuts' : [ 28, 28 ],
}
#------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
scale_factor = 1.0,
precision = 20.0,
use_mm = True,
dpi = DEFAULT_DPI,
include_reverse = True,
):
super( Svg2ModExportLegacy, self ).__init__(
svg2mod_import,
file_name,
scale_factor,
precision,
use_mm,
dpi,
)
self.include_reverse = include_reverse
#------------------------------------------------------------------------
def _get_layer_name( self, name, front ):
layer_info = self.layer_map[ name ]
layer = layer_info[ 0 ]
if not front and layer_info[ 1 ] is not None:
layer = layer_info[ 1 ]
return layer
#------------------------------------------------------------------------
def _get_module_name( self, front = None ):
if self.include_reverse and not front:
return self.imported.module_name + "-rev"
return self.imported.module_name
#------------------------------------------------------------------------
def _write_library_intro( self ):
modules_list = self._get_module_name( front = True )
if self.include_reverse:
modules_list += (
"\n" +
self._get_module_name( front = False )
)
units = ""
if self.use_mm:
units = "\nUnits mm"
self.output_file.write( """PCBNEW-LibModule-V1 {0}{1}
$INDEX
{2}
$EndINDEX
#
# {3}
#
""".format(
datetime.datetime.now().strftime( "%a %d %b %Y %I:%M:%S %p %Z" ),
units,
modules_list,
self.imported.file_name,
)
)
#------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
self.output_file.write( """$MODULE {0}
Po 0 0 0 {6} 00000000 00000000 ~~
Li {0}
T0 0 {1} {2} {2} 0 {3} N I 21 "{0}"
T1 0 {5} {2} {2} 0 {3} N I 21 "{4}"
""".format(
self._get_module_name( front ),
reference_y,
label_size,
label_pen,
self.imported.module_value,
value_y,
15, # Seems necessary
)
)
#------------------------------------------------------------------------
def _write_module_footer( self, front ):
self.output_file.write(
"$EndMODULE {0}\n".format( self._get_module_name( front ) )
)
#------------------------------------------------------------------------
def _write_modules( self ):
self._write_module( front = True )
if self.include_reverse:
self._write_module( front = False )
self.output_file.write( "$EndLIBRARY" )
#------------------------------------------------------------------------
def _write_polygon( self, points, layer, fill, stroke, stroke_width ):
if fill:
self._write_polygon_filled(
points, layer
)
if stroke:
self._write_polygon_outline(
points, layer, stroke_width
)
#------------------------------------------------------------------------
def _write_polygon_footer( self, layer, stroke_width ):
pass
#------------------------------------------------------------------------
def _write_polygon_header( self, points, layer ):
pen = 1
if self.use_mm:
pen = self._convert_decimil_to_mm( pen )
self.output_file.write( "DP 0 0 0 0 {} {} {}\n".format(
len( points ),
pen,
layer
) )
#------------------------------------------------------------------------
def _write_polygon_point( self, point ):
self.output_file.write(
"Dl {} {}\n".format( point.x, point.y )
)
#------------------------------------------------------------------------
def _write_polygon_segment( self, p, q, layer, stroke_width ):
self.output_file.write( "DS {} {} {} {} {} {}\n".format(
p.x, p.y,
q.x, q.y,
stroke_width,
layer
) )
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExportLegacyUpdater( Svg2ModExportLegacy ):
#------------------------------------------------------------------------
def __init__(
self,
svg2mod_import,
file_name,
scale_factor = 1.0,
precision = 20.0,
dpi = DEFAULT_DPI,
include_reverse = True,
):
self.file_name = file_name
use_mm = self._parse_output_file()
super( Svg2ModExportLegacyUpdater, self ).__init__(
svg2mod_import,
file_name,
scale_factor,
precision,
use_mm,
dpi,
include_reverse,
)
#------------------------------------------------------------------------
def _parse_output_file( self ):
print( "Parsing module file: {}".format( self.file_name ) )
module_file = open( self.file_name, 'r' )
lines = module_file.readlines()
module_file.close()
self.loaded_modules = {}
self.post_index = []
self.pre_index = []
use_mm = False
index = 0
# Find the start of the index:
while index < len( lines ):
line = lines[ index ]
index += 1
self.pre_index.append( line )
if line[ : 6 ] == "$INDEX":
break
m = re.match( "Units[\s]+mm[\s]*", line )
if m is not None:
print( " Use mm detected" )
use_mm = True
# Read the index:
while index < len( lines ):
line = lines[ index ]
if line[ : 9 ] == "$EndINDEX":
break
index += 1
self.loaded_modules[ line.strip() ] = []
# Read up until the first module:
while index < len( lines ):
line = lines[ index ]
if line[ : 7 ] == "$MODULE":
break
index += 1
self.post_index.append( line )
# Read modules:
while index < len( lines ):
line = lines[ index ]
if line[ : 7 ] == "$MODULE":
module_name, module_lines, index = self._read_module( lines, index )
if module_name is not None:
self.loaded_modules[ module_name ] = module_lines
elif line[ : 11 ] == "$EndLIBRARY":
break
else:
raise Exception(
"Expected $EndLIBRARY: [{}]".format( line )
)
#print( "Pre-index:" )
#pprint( self.pre_index )
#print( "Post-index:" )
#pprint( self.post_index )
#print( "Loaded modules:" )
#pprint( self.loaded_modules )
return use_mm
#------------------------------------------------------------------------
def _read_module( self, lines, index ):
# Read module name:
m = re.match( r'\$MODULE[\s]+([^\s]+)[\s]*', lines[ index ] )
module_name = m.group( 1 )
print( " Reading module {}".format( module_name ) )
index += 1
module_lines = []
while index < len( lines ):
line = lines[ index ]
index += 1
m = re.match(
r'\$EndMODULE[\s]+' + module_name + r'[\s]*', line
)
if m is not None:
return module_name, module_lines, index
module_lines.append( line )
raise Exception(
"Could not find end of module '{}'".format( module_name )
)
#------------------------------------------------------------------------
def _write_library_intro( self ):
# Write pre-index:
self.output_file.writelines( self.pre_index )
self.loaded_modules[ self._get_module_name( front = True ) ] = None
if self.include_reverse:
self.loaded_modules[
self._get_module_name( front = False )
] = None
# Write index:
for module_name in sorted(
self.loaded_modules.iterkeys(),
key = str.lower
):
self.output_file.write( module_name + "\n" )
# Write post-index:
self.output_file.writelines( self.post_index )
#------------------------------------------------------------------------
def _write_preserved_modules( self, up_to = None ):
if up_to is not None:
up_to = up_to.lower()
for module_name in sorted(
self.loaded_modules.iterkeys(),
key = str.lower
):
if up_to is not None and module_name.lower() >= up_to:
continue
module_lines = self.loaded_modules[ module_name ]
if module_lines is not None:
self.output_file.write(
"$MODULE {}\n".format( module_name )
)
self.output_file.writelines( module_lines )
self.output_file.write(
"$EndMODULE {}\n".format( module_name )
)
self.loaded_modules[ module_name ] = None
#------------------------------------------------------------------------
def _write_module_footer( self, front ):
super( Svg2ModExportLegacyUpdater, self )._write_module_footer(
front,
)
# Write remaining modules:
if not front:
self._write_preserved_modules()
#------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
self._write_preserved_modules(
up_to = self._get_module_name( front )
)
super( Svg2ModExportLegacyUpdater, self )._write_module_header(
label_size,
label_pen,
reference_y,
value_y,
front,
)
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
class Svg2ModExportPretty( Svg2ModExport ):
layer_map = {
#'inkscape-name' : kicad-name,
'Cu' : "{}.Cu",
'Adhes' : "{}.Adhes",
'Paste' : "{}.Paste",
'SilkS' : "{}.SilkS",
'Mask' : "{}.Mask",
'CrtYd' : "{}.CrtYd",
'Fab' : "{}.Fab",
'Edge.Cuts' : "Edge.Cuts"
}
#------------------------------------------------------------------------
def _get_layer_name( self, name, front ):
if front:
return self.layer_map[ name ].format("F")
else:
return self.layer_map[ name ].format("B")
#------------------------------------------------------------------------
def _get_module_name( self, front = None ):
return self.imported.module_name
#------------------------------------------------------------------------
def _write_library_intro( self ):
self.output_file.write( """(module {0} (layer F.Cu) (tedit {1:8X})
(attr smd)
(descr "{2}")
(tags {3})
""".format(
self.imported.module_name, #0
int( round( os.path.getctime( #1
self.imported.file_name
) ) ),
"Imported from {}".format( self.imported.file_name ), #2
"svg2mod", #3
)
)
#------------------------------------------------------------------------
def _write_module_footer( self, front ):
self.output_file.write( "\n)" )
#------------------------------------------------------------------------
def _write_module_header(
self,
label_size,
label_pen,
reference_y,
value_y,
front,
):
if front:
side = "F"
else:
side = "B"
self.output_file.write(
""" (fp_text reference {0} (at 0 {1}) (layer {2}.SilkS) hide
(effects (font (size {3} {3}) (thickness {4})))
)
(fp_text value {5} (at 0 {6}) (layer {2}.SilkS) hide
(effects (font (size {3} {3}) (thickness {4})))
)""".format(
self._get_module_name(), #0
reference_y, #1
side, #2
label_size, #3
label_pen, #4
self.imported.module_value, #5
value_y, #6
)
)
#------------------------------------------------------------------------
def _write_modules( self ):
self._write_module( front = True )
#------------------------------------------------------------------------
def _write_polygon( self, points, layer, fill, stroke, stroke_width ):
if fill:
self._write_polygon_filled(
points, layer, stroke_width
)
# Polygons with a fill and stroke are drawn with the filled polygon
# above:
if stroke and not fill:
self._write_polygon_outline(
points, layer, stroke_width
)
#------------------------------------------------------------------------
def _write_polygon_footer( self, layer, stroke_width ):
self.output_file.write(
" )\n (layer {})\n (width {})\n )".format(
layer, stroke_width
)
)
#------------------------------------------------------------------------
def _write_polygon_header( self, points, layer ):
self.output_file.write( "\n (fp_poly\n (pts \n" )
#------------------------------------------------------------------------
def _write_polygon_point( self, point ):
self.output_file.write(
" (xy {} {})\n".format( point.x, point.y )
)
#------------------------------------------------------------------------
def _write_polygon_segment( self, p, q, layer, stroke_width ):
self.output_file.write(
"""\n (fp_line
(start {} {})
(end {} {})
(layer {})
(width {})
)""".format(
p.x, p.y,
q.x, q.y,
layer,
stroke_width,
)
)
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
def get_arguments():
parser = argparse.ArgumentParser(
description = (
'Convert Inkscape SVG drawings to KiCad footprint modules.'
)
)
#------------------------------------------------------------------------
parser.add_argument(
'-i', '--input-file',
type = str,
dest = 'input_file_name',
metavar = 'FILENAME',
help = "name of the SVG file",
required = True,
)
parser.add_argument(
'-o', '--output-file',
type = str,
dest = 'output_file_name',
metavar = 'FILENAME',
help = "name of the module file",
)
parser.add_argument(
'--name', '--module-name',
type = str,
dest = 'module_name',
metavar = 'NAME',
help = "base name of the module",
default = "svg2mod",
)
parser.add_argument(
'--value', '--module-value',
type = str,
dest = 'module_value',
metavar = 'VALUE',
help = "value of the module",
default = "G***",
)
parser.add_argument(
'-f', '--factor',
type = float,
dest = 'scale_factor',
metavar = 'FACTOR',
help = "scale paths by this factor",
default = 1.0,
)
parser.add_argument(
'-p', '--precision',
type = float,
dest = 'precision',
metavar = 'PRECISION',
help = "smoothness for approximating curves with line segments (float)",
default = 10.0,
)
parser.add_argument(
'--front-only',
dest = 'front_only',
action = 'store_const',
const = True,
help = "omit output of back module (legacy output format)",
default = False,
)
parser.add_argument(
'--format',
type = str,
dest = 'format',
metavar = 'FORMAT',
choices = [ 'legacy', 'pretty' ],
help = "output module file format (legacy|pretty)",
default = 'pretty',
)
parser.add_argument(
'--units',
type = str,
dest = 'units',
metavar = 'UNITS',
choices = [ 'decimil', 'mm' ],
help = "output units, if output format is legacy (decimil|mm)",
default = 'mm',
)
parser.add_argument(
'-d', '--dpi',
type = int,
dest = 'dpi',
metavar = 'DPI',
help = "DPI of the SVG file (int)",
default = DEFAULT_DPI,
)
return parser.parse_args(), parser
#------------------------------------------------------------------------
#----------------------------------------------------------------------------
main()
#----------------------------------------------------------------------------
# vi: set et sts=4 sw=4 ts=4:
| 27.135709 | 97 | 0.43131 |
4a274e454f34759f91e9b97175b1709948e54e7e | 1,239 | py | Python | contracts/contract_account.py | fabrice102/create-asa-with-ssc | 616025599b54cc3cff94f5abb012535c7eebcf03 | [
"MIT"
] | 5 | 2021-06-17T18:17:52.000Z | 2021-07-18T07:58:32.000Z | contracts/contract_account.py | fabrice102/create-asa-with-ssc | 616025599b54cc3cff94f5abb012535c7eebcf03 | [
"MIT"
] | null | null | null | contracts/contract_account.py | fabrice102/create-asa-with-ssc | 616025599b54cc3cff94f5abb012535c7eebcf03 | [
"MIT"
] | null | null | null | import sys
from pyteal import *
def contract_account(app_id):
asset_close_to_check = Txn.asset_close_to() == Global.zero_address()
rekey_check = Txn.rekey_to() == Global.zero_address()
linked_with_app_call = And(
Gtxn[0].type_enum() == TxnType.ApplicationCall,
Gtxn[0].application_id() == Int(app_id)
)
fee_check = Txn.fee() <= Int(1000)
# create asa from escrow
on_create_asa = Txn.type_enum() == TxnType.AssetConfig
# fund 1 asa that has been created by escrow
on_fund_asa = Seq([
Assert(Txn.type_enum() == TxnType.AssetTransfer),
Assert(Txn.asset_sender() == Global.zero_address()),
Assert(asset_close_to_check),
Assert(Txn.asset_amount() == Int(1)),
Int(1)
])
return Seq([
Assert(Txn.group_index() == Int(1)),
Assert(linked_with_app_call),
Assert(rekey_check),
Assert(fee_check),
Cond(
[Gtxn[0].application_args[0] == Bytes("create_asa"), on_create_asa],
[Gtxn[0].application_args[0] == Bytes("fund_asa"), on_fund_asa]
)
])
if __name__ == "__main__":
arg = int(sys.argv[1])
print(compileTeal(contract_account(arg), Mode.Signature, version=3))
| 29.5 | 80 | 0.625504 |
4a274e9f499232b325635a993f9a963c9d2c20e9 | 7,750 | py | Python | monitor-app-route-report-csv.py | ccie29824/viptela_application-route-policy | 610f3a054a1bf5fae691845b7bc0d410609dc38e | [
"BSD-3-Clause"
] | 6 | 2020-05-31T06:37:31.000Z | 2020-11-18T03:14:38.000Z | monitor-app-route-report-csv.py | ccie29824/viptela_application-route-policy | 610f3a054a1bf5fae691845b7bc0d410609dc38e | [
"BSD-3-Clause"
] | null | null | null | monitor-app-route-report-csv.py | ccie29824/viptela_application-route-policy | 610f3a054a1bf5fae691845b7bc0d410609dc38e | [
"BSD-3-Clause"
] | 4 | 2020-05-13T06:19:07.000Z | 2021-03-08T03:55:52.000Z | import requests
import sys
import json
import os
import time
import logging
import tabulate
import yaml
from logging.handlers import TimedRotatingFileHandler
requests.packages.urllib3.disable_warnings()
from requests.packages.urllib3.exceptions import InsecureRequestWarning
def get_logger(logfile, level):
'''
Create a logger
'''
if logfile is not None:
'''
Create the log directory if it doesn't exist
'''
fldr = os.path.dirname(logfile)
if not os.path.exists(fldr):
os.makedirs(fldr)
logger = logging.getLogger()
logger.setLevel(level)
log_format = '%(asctime)s | %(levelname)-8s | %(funcName)-20s | %(lineno)-3d | %(message)s'
formatter = logging.Formatter(log_format)
file_handler = TimedRotatingFileHandler(logfile, when='midnight', backupCount=7)
file_handler.setFormatter(formatter)
file_handler.setLevel(level)
logger.addHandler(file_handler)
return logger
return None
class Authentication:
@staticmethod
def get_jsessionid(vmanage_host, vmanage_port, username, password):
api = "/j_security_check"
base_url = "https://%s:%s"%(vmanage_host, vmanage_port)
url = base_url + api
payload = {'j_username' : username, 'j_password' : password}
response = requests.post(url=url, data=payload, verify=False)
try:
cookies = response.headers["Set-Cookie"]
jsessionid = cookies.split(";")
return(jsessionid[0])
except:
if logger is not None:
logger.error("No valid JSESSION ID returned\n")
exit()
@staticmethod
def get_token(vmanage_host, vmanage_port, jsessionid):
headers = {'Cookie': jsessionid}
base_url = "https://%s:%s"%(vmanage_host, vmanage_port)
api = "/dataservice/client/token"
url = base_url + api
response = requests.get(url=url, headers=headers, verify=False)
if response.status_code == 200:
return(response.text)
else:
return None
if __name__ == '__main__':
try:
log_level = logging.DEBUG
logger = get_logger("log/app_route_report.txt", log_level)
if logger is not None:
logger.info("Loading vManage login details from YAML\n")
with open("vmanage_login.yaml") as f:
config = yaml.safe_load(f.read())
vmanage_host = config["vmanage_host"]
vmanage_port = config["vmanage_port"]
username = config["vmanage_username"]
password = config["vmanage_password"]
rtr1_systemip = input("Enter Router-1 System IP address : ")
rtr2_systemip = input("Enter Router-2 System IP address : ")
Auth = Authentication()
jsessionid = Auth.get_jsessionid(vmanage_host,vmanage_port,username,password)
token = Auth.get_token(vmanage_host,vmanage_port,jsessionid)
if token is not None:
headers = {'Content-Type': "application/json",'Cookie': jsessionid, 'X-XSRF-TOKEN': token}
else:
headers = {'Content-Type': "application/json",'Cookie': jsessionid}
base_url = "https://%s:%s/dataservice"%(vmanage_host,vmanage_port)
# Get app route statistics for tunnels between router-1(rtr1_systemip) and router-2(rtr2_systemip)
api_url = "/statistics/approute/fec/aggregation"
payload = {
"query": {
"condition": "AND",
"rules": [
{
"value": [
"2020-02-29T08:00:00 UTC",
"2020-03-31T07:00:00 UTC"
],
"field": "entry_time",
"type": "date",
"operator": "between"
},
{
"value": [
rtr1_systemip
],
"field": "local_system_ip",
"type": "string",
"operator": "in"
},
{
"value": [
rtr2_systemip
],
"field": "remote_system_ip",
"type": "string",
"operator": "in"
}
]
},
"aggregation": {
"field": [
{
"property": "name",
"sequence": 1
},
{
"property": "proto",
"sequence": 2
}
],
"histogram": {
"property": "entry_time",
"type": "hour",
"interval": 24,
"order": "asc"
},
"metrics": [
{
"property": "latency",
"type": "avg"
},
{
"property": "jitter",
"type": "avg"
},
{
"property": "loss_percentage",
"type": "avg"
},
{
"property": "vqoe_score",
"type": "avg"
}
]
}
}
url = base_url + api_url
response = requests.post(url=url, headers=headers, data=json.dumps(payload), verify=False)
if response.status_code == 200:
app_route_stats = response.json()["data"]
app_route_stats_headers = ["Date", "Tunnel name", "vQoE score", "Latency", "Loss percentage", "Jitter"]
table = list()
print("\nAverage App route statistics between %s and %s for last 30 days\n"%(rtr1_systemip,rtr2_systemip))
for item in app_route_stats:
tr = [time.strftime('%m/%d/%Y', time.gmtime(item['entry_time']/1000.)), item['name'], item['vqoe_score'], item['latency'], item['loss_percentage'], item['jitter']]
table.append(tr)
try:
print(tabulate.tabulate(table, app_route_stats_headers, tablefmt="fancy_grid"))
csv_content = tabulate.tabulate(table, app_route_stats_headers, tablefmt="csv")
file_name = open("Tunnel Statistics %s.csv"%time.strftime("%Y-%m-%d"),"w")
file_name.write(csv_content)
file_name.close()
except UnicodeEncodeError:
print(tabulate.tabulate(table, app_route_stats_headers, tablefmt="grid"))
else:
if logger is not None:
logger.error("Failed to retrieve app route statistics\n")
except Exception as e:
print('Failed due to error',str(e))
| 37.259615 | 180 | 0.454194 |
4a27500b6976d0a3ae283165b4b304c2596c149d | 4,059 | py | Python | benchmark/startCirq1426.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq1426.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq1426.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=55
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[3])) # number=51
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=31
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=32
c.append(cirq.H.on(input_qubit[0])) # number=33
c.append(cirq.H.on(input_qubit[1])) # number=44
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=45
c.append(cirq.H.on(input_qubit[1])) # number=46
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=52
c.append(cirq.X.on(input_qubit[1])) # number=53
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=54
c.append(cirq.H.on(input_qubit[1])) # number=48
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=49
c.append(cirq.H.on(input_qubit[1])) # number=50
c.append(cirq.X.on(input_qubit[0])) # number=26
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=27
c.append(cirq.H.on(input_qubit[1])) # number=37
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=38
c.append(cirq.H.on(input_qubit[1])) # number=39
c.append(cirq.X.on(input_qubit[1])) # number=35
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=36
c.append(cirq.X.on(input_qubit[2])) # number=11
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[2])) # number=43
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[2])) # number=47
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=22
c.append(cirq.X.on(input_qubit[1])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=24
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[1])) # number=29
c.append(cirq.Y.on(input_qubit[4])) # number=28
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq1426.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 39.028846 | 77 | 0.646465 |
4a27506c3f62de61587af4f428faaa37a1a38dc8 | 3,567 | py | Python | system/t04_mirror/edit.py | masselstine/aptly | 3d2ccf1b2a02ea45eb0fa4fa7180b1809cb4590d | [
"MIT"
] | 1,035 | 2015-01-05T00:37:30.000Z | 2018-04-17T17:17:20.000Z | system/t04_mirror/edit.py | utopia-repository/aptly | 799462acc46628f85ab1539c8866f864216edee5 | [
"MIT"
] | 502 | 2015-01-05T09:45:29.000Z | 2018-04-18T01:37:33.000Z | system/t04_mirror/edit.py | utopia-repository/aptly | 799462acc46628f85ab1539c8866f864216edee5 | [
"MIT"
] | 198 | 2015-01-02T03:08:20.000Z | 2018-04-16T22:16:51.000Z | import re
from lib import BaseTest
class EditMirror1Test(BaseTest):
"""
edit mirror: enable filter & download sources
"""
fixtureDB = True
runCmd = "aptly mirror edit -filter=nginx -filter-with-deps -with-sources wheezy-main"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show wheezy-main", "mirror_show", match_prepare=lambda s: re.sub(r"Last update: [0-9:+A-Za-z -]+\n", "", s))
class EditMirror2Test(BaseTest):
"""
edit mirror: missing mirror
"""
runCmd = "aptly mirror edit wheezy-main"
expectedCode = 1
class EditMirror3Test(BaseTest):
"""
edit mirror: no changes
"""
fixtureDB = True
runCmd = "aptly mirror edit wheezy-main"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show wheezy-main", "mirror_show", match_prepare=lambda s: re.sub(r"Last update: [0-9:+A-Za-z -]+\n", "", s))
class EditMirror4Test(BaseTest):
"""
edit mirror: wrong query
"""
fixtureDB = True
runCmd = "aptly mirror edit -filter=| wheezy-main"
expectedCode = 1
class EditMirror5Test(BaseTest):
"""
edit mirror: remove filter
"""
fixtureCmds = [
"aptly mirror create -ignore-signatures -filter='nginx | Priority (required)' mirror5 http://security.debian.org/ stretch/updates main",
]
runCmd = "aptly mirror edit -filter= mirror5"
def check(self):
def removeDates(s):
return re.sub(r"(Date|Valid-Until): [,0-9:+A-Za-z -]+\n", "", s)
self.check_output()
self.check_cmd_output("aptly mirror show mirror5", "mirror_show", match_prepare=removeDates)
class EditMirror6Test(BaseTest):
"""
edit mirror: change architectures
"""
fixtureCmds = [
"aptly mirror create -ignore-signatures -architectures=amd64 mirror6 http://mirror.yandex.ru/debian stretch main"
]
runCmd = "aptly mirror edit -ignore-signatures -architectures=amd64,i386 mirror6"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show mirror6", "mirror_show", match_prepare=lambda s: re.sub(r"Last update: [0-9:+A-Za-z -]+\n", "", s))
class EditMirror7Test(BaseTest):
"""
edit mirror: change architectures to missing archs
"""
fixtureCmds = [
"aptly mirror create -ignore-signatures -architectures=amd64 stretch http://mirror.yandex.ru/debian stretch main"
]
runCmd = "aptly mirror edit -ignore-signatures -architectures=amd64,x56 stretch"
expectedCode = 1
class EditMirror8Test(BaseTest):
"""
edit mirror: enable udebs
"""
fixtureDB = True
runCmd = "aptly mirror edit -with-udebs wheezy-main"
def check(self):
self.check_output()
self.check_cmd_output("aptly mirror show wheezy-main", "mirror_show", match_prepare=lambda s: re.sub(r"Last update: [0-9:+A-Za-z -]+\n", "", s))
class EditMirror9Test(BaseTest):
"""
edit mirror: flat mirror with udebs
"""
fixtureCmds = ["aptly mirror create -keyring=aptlytest.gpg mirror9 http://pkg.jenkins-ci.org/debian-stable binary/"]
fixtureGpg = True
runCmd = "aptly mirror edit -with-udebs mirror9"
expectedCode = 1
class EditMirror10Test(BaseTest):
"""
edit mirror: change archive url
"""
requiresFTP = True
fixtureCmds = ["aptly mirror create -ignore-signatures mirror10 ftp://ftp.ru.debian.org/debian stretch main"]
runCmd = "aptly mirror edit -ignore-signatures -archive-url ftp://ftp.ch.debian.org/debian mirror10"
| 30.487179 | 152 | 0.662461 |
4a2750a66ec41830ed5b9b3e040f7b708af5c733 | 70,391 | py | Python | sympy/tensor/tests/test_tensor.py | giorgosgiapis/sympy | 6128b47aa008657715ce5c70b7c3fdd6bd3f1648 | [
"BSD-3-Clause"
] | null | null | null | sympy/tensor/tests/test_tensor.py | giorgosgiapis/sympy | 6128b47aa008657715ce5c70b7c3fdd6bd3f1648 | [
"BSD-3-Clause"
] | null | null | null | sympy/tensor/tests/test_tensor.py | giorgosgiapis/sympy | 6128b47aa008657715ce5c70b7c3fdd6bd3f1648 | [
"BSD-3-Clause"
] | null | null | null | from functools import wraps
from sympy import Matrix, eye, Integer, expand, Indexed, Sum
from sympy.combinatorics import Permutation
from sympy.core import S, Rational, Symbol, Basic, Add
from sympy.core.containers import Tuple
from sympy.core.symbol import symbols
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.printing.pretty.pretty import pretty
from sympy.tensor.array import Array
from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, \
get_symmetric_group_sgs, TensorIndex, tensor_mul, TensAdd, \
riemann_cyclic_replace, riemann_cyclic, TensMul, tensor_heads, \
TensorManager, TensExpr, TensorHead, canon_bp, \
tensorhead, tensorsymmetry, TensorType, substitute_indices
from sympy.utilities.pytest import raises, XFAIL, warns_deprecated_sympy, ignore_warnings
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.core.compatibility import range
from sympy.matrices import diag
def filter_warnings_decorator(f):
@wraps(f)
def wrapper():
with ignore_warnings(SymPyDeprecationWarning):
f()
return wrapper
def _is_equal(arg1, arg2):
if isinstance(arg1, TensExpr):
return arg1.equals(arg2)
elif isinstance(arg2, TensExpr):
return arg2.equals(arg1)
return arg1 == arg2
#################### Tests from tensor_can.py #######################
def test_canonicalize_no_slot_sym():
# A_d0 * B^d0; T_c = A^d0*B_d0
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b, d0, d1 = tensor_indices('a,b,d0,d1', Lorentz)
A, B = tensor_heads('A,B', [Lorentz], TensorSymmetry.no_symmetry(1))
t = A(-d0)*B(d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0)*B(-L_0)'
# A^a * B^b; T_c = T
t = A(a)*B(b)
tc = t.canon_bp()
assert tc == t
# B^b * A^a
t1 = B(b)*A(a)
tc = t1.canon_bp()
assert str(tc) == 'A(a)*B(b)'
# A symmetric
# A^{b}_{d0}*A^{d0, a}; T_c = A^{a d0}*A{b}_{d0}
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(b, -d0)*A(d0, a)
tc = t.canon_bp()
assert str(tc) == 'A(a, L_0)*A(b, -L_0)'
# A^{d1}_{d0}*B^d0*C_d1
# T_c = A^{d0 d1}*B_d0*C_d1
B, C = tensor_heads('B,C', [Lorentz], TensorSymmetry.no_symmetry(1))
t = A(d1, -d0)*B(d0)*C(-d1)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-L_0)*C(-L_1)'
# A without symmetry
# A^{d1}_{d0}*B^d0*C_d1 ord=[d0,-d0,d1,-d1]; g = [2,1,0,3,4,5]
# T_c = A^{d0 d1}*B_d1*C_d0; can = [0,2,3,1,4,5]
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2))
t = A(d1, -d0)*B(d0)*C(-d1)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-L_1)*C(-L_0)'
# A, B without symmetry
# A^{d1}_{d0}*B_{d1}^{d0}
# T_c = A^{d0 d1}*B_{d0 d1}
B = TensorHead('B', [Lorentz]*2, TensorSymmetry.no_symmetry(2))
t = A(d1, -d0)*B(-d1, d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-L_0, -L_1)'
# A_{d0}^{d1}*B_{d1}^{d0}
# T_c = A^{d0 d1}*B_{d1 d0}
t = A(-d0, d1)*B(-d1, d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-L_1, -L_0)'
# A, B, C without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b}
# T_c=A^{d0 d1}*B_{a d1}*C_{d0 b}
C = TensorHead('C', [Lorentz]*2, TensorSymmetry.no_symmetry(2))
t = A(d1, d0)*B(-a, -d0)*C(-d1, -b)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-a, -L_1)*C(-L_0, -b)'
# A symmetric, B and C without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b}
# T_c = A^{d0 d1}*B_{a d0}*C_{d1 b}
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(d1, d0)*B(-a, -d0)*C(-d1, -b)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-a, -L_0)*C(-L_1, -b)'
# A and C symmetric, B without symmetry
# A^{d1 d0}*B_{a d0}*C_{d1 b} ord=[a,b,d0,-d0,d1,-d1]
# T_c = A^{d0 d1}*B_{a d0}*C_{b d1}
C = TensorHead('C', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(d1, d0)*B(-a, -d0)*C(-d1, -b)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1)*B(-a, -L_0)*C(-b, -L_1)'
def test_canonicalize_no_dummies():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b, c, d = tensor_indices('a, b, c, d', Lorentz)
# A commuting
# A^c A^b A^a
# T_c = A^a A^b A^c
A = TensorHead('A', [Lorentz], TensorSymmetry.no_symmetry(1))
t = A(c)*A(b)*A(a)
tc = t.canon_bp()
assert str(tc) == 'A(a)*A(b)*A(c)'
# A anticommuting
# A^c A^b A^a
# T_c = -A^a A^b A^c
A = TensorHead('A', [Lorentz], TensorSymmetry.no_symmetry(1), 1)
t = A(c)*A(b)*A(a)
tc = t.canon_bp()
assert str(tc) == '-A(a)*A(b)*A(c)'
# A commuting and symmetric
# A^{b,d}*A^{c,a}
# T_c = A^{a c}*A^{b d}
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(b, d)*A(c, a)
tc = t.canon_bp()
assert str(tc) == 'A(a, c)*A(b, d)'
# A anticommuting and symmetric
# A^{b,d}*A^{c,a}
# T_c = -A^{a c}*A^{b d}
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2), 1)
t = A(b, d)*A(c, a)
tc = t.canon_bp()
assert str(tc) == '-A(a, c)*A(b, d)'
# A^{c,a}*A^{b,d}
# T_c = A^{a c}*A^{b d}
t = A(c, a)*A(b, d)
tc = t.canon_bp()
assert str(tc) == 'A(a, c)*A(b, d)'
def test_tensorhead_construction_without_symmetry():
L = TensorIndexType('Lorentz')
A1 = TensorHead('A', [L, L])
A2 = TensorHead('A', [L, L], TensorSymmetry.no_symmetry(2))
assert A1 == A2
A3 = TensorHead('A', [L, L], TensorSymmetry.fully_symmetric(2)) # Symmetric
assert A1 != A3
def test_no_metric_symmetry():
# no metric symmetry; A no symmetry
# A^d1_d0 * A^d0_d1
# T_c = A^d0_d1 * A^d1_d0
Lorentz = TensorIndexType('Lorentz', dummy_name='L', metric_symmetry=0)
d0, d1, d2, d3 = tensor_indices('d:4', Lorentz)
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2))
t = A(d1, -d0)*A(d0, -d1)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, -L_1)*A(L_1, -L_0)'
# A^d1_d2 * A^d0_d3 * A^d2_d1 * A^d3_d0
# T_c = A^d0_d1 * A^d1_d0 * A^d2_d3 * A^d3_d2
t = A(d1, -d2)*A(d0, -d3)*A(d2, -d1)*A(d3, -d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, -L_1)*A(L_1, -L_0)*A(L_2, -L_3)*A(L_3, -L_2)'
# A^d0_d2 * A^d1_d3 * A^d3_d0 * A^d2_d1
# T_c = A^d0_d1 * A^d1_d2 * A^d2_d3 * A^d3_d0
t = A(d0, -d1)*A(d1, -d2)*A(d2, -d3)*A(d3, -d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, -L_1)*A(L_1, -L_2)*A(L_2, -L_3)*A(L_3, -L_0)'
def test_canonicalize1():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, a0, a1, a2, a3, b, d0, d1, d2, d3 = \
tensor_indices('a,a0,a1,a2,a3,b,d0,d1,d2,d3', Lorentz)
# A_d0*A^d0; ord = [d0,-d0]
# T_c = A^d0*A_d0
A = TensorHead('A', [Lorentz], TensorSymmetry.no_symmetry(1))
t = A(-d0)*A(d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0)*A(-L_0)'
# A commuting
# A_d0*A_d1*A_d2*A^d2*A^d1*A^d0
# T_c = A^d0*A_d0*A^d1*A_d1*A^d2*A_d2
t = A(-d0)*A(-d1)*A(-d2)*A(d2)*A(d1)*A(d0)
tc = t.canon_bp()
assert str(tc) == 'A(L_0)*A(-L_0)*A(L_1)*A(-L_1)*A(L_2)*A(-L_2)'
# A anticommuting
# A_d0*A_d1*A_d2*A^d2*A^d1*A^d0
# T_c 0
A = TensorHead('A', [Lorentz], TensorSymmetry.no_symmetry(1), 1)
t = A(-d0)*A(-d1)*A(-d2)*A(d2)*A(d1)*A(d0)
tc = t.canon_bp()
assert tc == 0
# A commuting symmetric
# A^{d0 b}*A^a_d1*A^d1_d0
# T_c = A^{a d0}*A^{b d1}*A_{d0 d1}
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(d0, b)*A(a, -d1)*A(d1, -d0)
tc = t.canon_bp()
assert str(tc) == 'A(a, L_0)*A(b, L_1)*A(-L_0, -L_1)'
# A, B commuting symmetric
# A^{d0 b}*A^d1_d0*B^a_d1
# T_c = A^{b d0}*A_d0^d1*B^a_d1
B = TensorHead('B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(d0, b)*A(d1, -d0)*B(a, -d1)
tc = t.canon_bp()
assert str(tc) == 'A(b, L_0)*A(-L_0, L_1)*B(a, -L_1)'
# A commuting symmetric
# A^{d1 d0 b}*A^{a}_{d1 d0}; ord=[a,b, d0,-d0,d1,-d1]
# T_c = A^{a d0 d1}*A^{b}_{d0 d1}
A = TensorHead('A', [Lorentz]*3, TensorSymmetry.fully_symmetric(3))
t = A(d1, d0, b)*A(a, -d1, -d0)
tc = t.canon_bp()
assert str(tc) == 'A(a, L_0, L_1)*A(b, -L_0, -L_1)'
# A^{d3 d0 d2}*A^a0_{d1 d2}*A^d1_d3^a1*A^{a2 a3}_d0
# T_c = A^{a0 d0 d1}*A^a1_d0^d2*A^{a2 a3 d3}*A_{d1 d2 d3}
t = A(d3, d0, d2)*A(a0, -d1, -d2)*A(d1, -d3, a1)*A(a2, a3, -d0)
tc = t.canon_bp()
assert str(tc) == 'A(a0, L_0, L_1)*A(a1, -L_0, L_2)*A(a2, a3, L_3)*A(-L_1, -L_2, -L_3)'
# A commuting symmetric, B antisymmetric
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# in this esxample and in the next three,
# renaming dummy indices and using symmetry of A,
# T = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3
# can = 0
A = TensorHead('A', [Lorentz]*3, TensorSymmetry.fully_symmetric(3))
B = TensorHead('B', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2))
t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3)
tc = t.canon_bp()
assert tc == 0
# A anticommuting symmetric, B antisymmetric
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3}
A = TensorHead('A', [Lorentz]*3, TensorSymmetry.fully_symmetric(3), 1)
B = TensorHead('B', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2))
t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3)
tc = t.canon_bp()
assert str(tc) == 'A(L_0, L_1, L_2)*A(-L_0, -L_1, L_3)*B(-L_2, -L_3)'
# A anticommuting symmetric, B antisymmetric commuting, antisymmetric metric
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = -A^{d0 d1 d2} * A_{d0 d1}^d3 * B_{d2 d3}
Spinor = TensorIndexType('Spinor', dummy_name='S', metric_symmetry=-1)
a, a0, a1, a2, a3, b, d0, d1, d2, d3 = \
tensor_indices('a,a0,a1,a2,a3,b,d0,d1,d2,d3', Spinor)
A = TensorHead('A', [Spinor]*3, TensorSymmetry.fully_symmetric(3), 1)
B = TensorHead('B', [Spinor]*2, TensorSymmetry.fully_symmetric(-2))
t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3)
tc = t.canon_bp()
assert str(tc) == '-A(S_0, S_1, S_2)*A(-S_0, -S_1, S_3)*B(-S_2, -S_3)'
# A anticommuting symmetric, B antisymmetric anticommuting,
# no metric symmetry
# A^{d0 d1 d2} * A_{d2 d3 d1} * B_d0^d3
# T_c = A^{d0 d1 d2} * A_{d0 d1 d3} * B_d2^d3
Mat = TensorIndexType('Mat', metric_symmetry=0, dummy_name='M')
a, a0, a1, a2, a3, b, d0, d1, d2, d3 = \
tensor_indices('a,a0,a1,a2,a3,b,d0,d1,d2,d3', Mat)
A = TensorHead('A', [Mat]*3, TensorSymmetry.fully_symmetric(3), 1)
B = TensorHead('B', [Mat]*2, TensorSymmetry.fully_symmetric(-2))
t = A(d0, d1, d2)*A(-d2, -d3, -d1)*B(-d0, d3)
tc = t.canon_bp()
assert str(tc) == 'A(M_0, M_1, M_2)*A(-M_0, -M_1, -M_3)*B(-M_2, M_3)'
# Gamma anticommuting
# Gamma_{mu nu} * gamma^rho * Gamma^{nu mu alpha}
# T_c = -Gamma^{mu nu} * gamma^rho * Gamma_{alpha mu nu}
alpha, beta, gamma, mu, nu, rho = \
tensor_indices('alpha,beta,gamma,mu,nu,rho', Lorentz)
Gamma = TensorHead('Gamma', [Lorentz],
TensorSymmetry.fully_symmetric(1), 2)
Gamma2 = TensorHead('Gamma', [Lorentz]*2,
TensorSymmetry.fully_symmetric(-2), 2)
Gamma3 = TensorHead('Gamma', [Lorentz]*3,
TensorSymmetry.fully_symmetric(-3), 2)
t = Gamma2(-mu, -nu)*Gamma(rho)*Gamma3(nu, mu, alpha)
tc = t.canon_bp()
assert str(tc) == '-Gamma(L_0, L_1)*Gamma(rho)*Gamma(alpha, -L_0, -L_1)'
# Gamma_{mu nu} * Gamma^{gamma beta} * gamma_rho * Gamma^{nu mu alpha}
# T_c = Gamma^{mu nu} * Gamma^{beta gamma} * gamma_rho * Gamma^alpha_{mu nu}
t = Gamma2(mu, nu)*Gamma2(beta, gamma)*Gamma(-rho)*Gamma3(alpha, -mu, -nu)
tc = t.canon_bp()
assert str(tc) == 'Gamma(L_0, L_1)*Gamma(beta, gamma)*Gamma(-rho)*Gamma(alpha, -L_0, -L_1)'
# f^a_{b,c} antisymmetric in b,c; A_mu^a no symmetry
# f^c_{d a} * f_{c e b} * A_mu^d * A_nu^a * A^{nu e} * A^{mu b}
# g = [8,11,5, 9,13,7, 1,10, 3,4, 2,12, 0,6, 14,15]
# T_c = -f^{a b c} * f_a^{d e} * A^mu_b * A_{mu d} * A^nu_c * A_{nu e}
Flavor = TensorIndexType('Flavor', dummy_name='F')
a, b, c, d, e, ff = tensor_indices('a,b,c,d,e,f', Flavor)
mu, nu = tensor_indices('mu,nu', Lorentz)
f = TensorHead('f', [Flavor]*3, TensorSymmetry.direct_product(1, -2))
A = TensorHead('A', [Lorentz, Flavor], TensorSymmetry.no_symmetry(2))
t = f(c, -d, -a)*f(-c, -e, -b)*A(-mu, d)*A(-nu, a)*A(nu, e)*A(mu, b)
tc = t.canon_bp()
assert str(tc) == '-f(F_0, F_1, F_2)*f(-F_0, F_3, F_4)*A(L_0, -F_1)*A(-L_0, -F_3)*A(L_1, -F_2)*A(-L_1, -F_4)'
def test_bug_correction_tensor_indices():
# to make sure that tensor_indices does not return a list if creating
# only one index:
A = TensorIndexType("A")
i = tensor_indices('i', A)
assert not isinstance(i, (tuple, list))
assert isinstance(i, TensorIndex)
def test_riemann_invariants():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11 = \
tensor_indices('d0:12', Lorentz)
# R^{d0 d1}_{d1 d0}; ord = [d0,-d0,d1,-d1]
# T_c = -R^{d0 d1}_{d0 d1}
R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
t = R(d0, d1, -d1, -d0)
tc = t.canon_bp()
assert str(tc) == '-R(L_0, L_1, -L_0, -L_1)'
# R_d11^d1_d0^d5 * R^{d6 d4 d0}_d5 * R_{d7 d2 d8 d9} *
# R_{d10 d3 d6 d4} * R^{d2 d7 d11}_d1 * R^{d8 d9 d3 d10}
# can = [0,2,4,6, 1,3,8,10, 5,7,12,14, 9,11,16,18, 13,15,20,22,
# 17,19,21<F10,23, 24,25]
# T_c = R^{d0 d1 d2 d3} * R_{d0 d1}^{d4 d5} * R_{d2 d3}^{d6 d7} *
# R_{d4 d5}^{d8 d9} * R_{d6 d7}^{d10 d11} * R_{d8 d9 d10 d11}
t = R(-d11,d1,-d0,d5)*R(d6,d4,d0,-d5)*R(-d7,-d2,-d8,-d9)* \
R(-d10,-d3,-d6,-d4)*R(d2,d7,d11,-d1)*R(d8,d9,d3,d10)
tc = t.canon_bp()
assert str(tc) == 'R(L_0, L_1, L_2, L_3)*R(-L_0, -L_1, L_4, L_5)*R(-L_2, -L_3, L_6, L_7)*R(-L_4, -L_5, L_8, L_9)*R(-L_6, -L_7, L_10, L_11)*R(-L_8, -L_9, -L_10, -L_11)'
def test_riemann_products():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
d0, d1, d2, d3, d4, d5, d6 = tensor_indices('d0:7', Lorentz)
a0, a1, a2, a3, a4, a5 = tensor_indices('a0:6', Lorentz)
a, b = tensor_indices('a,b', Lorentz)
R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
# R^{a b d0}_d0 = 0
t = R(a, b, d0, -d0)
tc = t.canon_bp()
assert tc == 0
# R^{d0 b a}_d0
# T_c = -R^{a d0 b}_d0
t = R(d0, b, a, -d0)
tc = t.canon_bp()
assert str(tc) == '-R(a, L_0, b, -L_0)'
# R^d1_d2^b_d0 * R^{d0 a}_d1^d2; ord=[a,b,d0,-d0,d1,-d1,d2,-d2]
# T_c = -R^{a d0 d1 d2}* R^b_{d0 d1 d2}
t = R(d1, -d2, b, -d0)*R(d0, a, -d1, d2)
tc = t.canon_bp()
assert str(tc) == '-R(a, L_0, L_1, L_2)*R(b, -L_0, -L_1, -L_2)'
# A symmetric commuting
# R^{d6 d5}_d2^d1 * R^{d4 d0 d2 d3} * A_{d6 d0} A_{d3 d1} * A_{d4 d5}
# g = [12,10,5,2, 8,0,4,6, 13,1, 7,3, 9,11,14,15]
# T_c = -R^{d0 d1 d2 d3} * R_d0^{d4 d5 d6} * A_{d1 d4}*A_{d2 d5}*A_{d3 d6}
V = TensorHead('V', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = R(d6, d5, -d2, d1)*R(d4, d0, d2, d3)*V(-d6, -d0)*V(-d3, -d1)*V(-d4, -d5)
tc = t.canon_bp()
assert str(tc) == '-R(L_0, L_1, L_2, L_3)*R(-L_0, L_4, L_5, L_6)*V(-L_1, -L_4)*V(-L_2, -L_5)*V(-L_3, -L_6)'
# R^{d2 a0 a2 d0} * R^d1_d2^{a1 a3} * R^{a4 a5}_{d0 d1}
# T_c = R^{a0 d0 a2 d1}*R^{a1 a3}_d0^d2*R^{a4 a5}_{d1 d2}
t = R(d2, a0, a2, d0)*R(d1, -d2, a1, a3)*R(a4, a5, -d0, -d1)
tc = t.canon_bp()
assert str(tc) == 'R(a0, L_0, a2, L_1)*R(a1, a3, -L_0, L_2)*R(a4, a5, -L_1, -L_2)'
######################################################################
def test_canonicalize2():
D = Symbol('D')
Eucl = TensorIndexType('Eucl', metric_symmetry=1, dim=D, dummy_name='E')
i0,i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14 = \
tensor_indices('i0:15', Eucl)
A = TensorHead('A', [Eucl]*3, TensorSymmetry.fully_symmetric(-3))
# two examples from Cvitanovic, Group Theory page 59
# of identities for antisymmetric tensors of rank 3
# contracted according to the Kuratowski graph eq.(6.59)
t = A(i0,i1,i2)*A(-i1,i3,i4)*A(-i3,i7,i5)*A(-i2,-i5,i6)*A(-i4,-i6,i8)
t1 = t.canon_bp()
assert t1 == 0
# eq.(6.60)
#t = A(i0,i1,i2)*A(-i1,i3,i4)*A(-i2,i5,i6)*A(-i3,i7,i8)*A(-i6,-i7,i9)*
# A(-i8,i10,i13)*A(-i5,-i10,i11)*A(-i4,-i11,i12)*A(-i3,-i12,i14)
t = A(i0,i1,i2)*A(-i1,i3,i4)*A(-i2,i5,i6)*A(-i3,i7,i8)*A(-i6,-i7,i9)*\
A(-i8,i10,i13)*A(-i5,-i10,i11)*A(-i4,-i11,i12)*A(-i9,-i12,i14)
t1 = t.canon_bp()
assert t1 == 0
def test_canonicalize3():
D = Symbol('D')
Spinor = TensorIndexType('Spinor', dim=D, metric_symmetry=-1, dummy_name='S')
a0,a1,a2,a3,a4 = tensor_indices('a0:5', Spinor)
chi, psi = tensor_heads('chi,psi', [Spinor], TensorSymmetry.no_symmetry(1), 1)
t = chi(a1)*psi(a0)
t1 = t.canon_bp()
assert t1 == t
t = psi(a1)*chi(a0)
t1 = t.canon_bp()
assert t1 == -chi(a0)*psi(a1)
def test_TensorIndexType():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', metric_name='g', metric_symmetry=1,
dim=D, dummy_name='L')
m0, m1, m2, m3, m4 = tensor_indices('m0:5', Lorentz)
sym2 = TensorSymmetry.fully_symmetric(2)
sym2n = TensorSymmetry(*get_symmetric_group_sgs(2))
assert sym2 == sym2n
g = Lorentz.metric
assert str(g) == 'g(Lorentz,Lorentz)'
assert Lorentz.eps_dim == Lorentz.dim
TSpace = TensorIndexType('TSpace', dummy_name = 'TSpace')
i0, i1 = tensor_indices('i0 i1', TSpace)
g = TSpace.metric
A = TensorHead('A', [TSpace]*2, sym2)
assert str(A(i0,-i0).canon_bp()) == 'A(TSpace_0, -TSpace_0)'
def test_indices():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
assert a.tensor_index_type == Lorentz
assert a != -a
A, B = tensor_heads('A B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(a,b)*B(-b,c)
indices = t.get_indices()
L_0 = TensorIndex('L_0', Lorentz)
assert indices == [a, L_0, -L_0, c]
raises(ValueError, lambda: tensor_indices(3, Lorentz))
raises(ValueError, lambda: A(a,b,c))
def test_TensorSymmetry():
assert TensorSymmetry.fully_symmetric(2) == \
TensorSymmetry(get_symmetric_group_sgs(2))
assert TensorSymmetry.fully_symmetric(-3) == \
TensorSymmetry(get_symmetric_group_sgs(3, True))
assert TensorSymmetry.direct_product(-4) == \
TensorSymmetry.fully_symmetric(-4)
assert TensorSymmetry.fully_symmetric(-1) == \
TensorSymmetry.fully_symmetric(1)
assert TensorSymmetry.direct_product(1, -1, 1) == \
TensorSymmetry.no_symmetry(3)
assert TensorSymmetry(get_symmetric_group_sgs(2)) == \
TensorSymmetry(*get_symmetric_group_sgs(2))
# TODO: add check for *get_symmetric_group_sgs(0)
sym = TensorSymmetry.fully_symmetric(-3)
assert sym.rank == 3
assert sym.base == Tuple(0, 1)
assert sym.generators == Tuple(Permutation(0, 1)(3, 4), Permutation(1, 2)(3, 4))
def test_TensExpr():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
g = Lorentz.metric
A, B = tensor_heads('A B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
raises(ValueError, lambda: g(c, d)/g(a, b))
raises(ValueError, lambda: S.One/g(a, b))
raises(ValueError, lambda: (A(c, d) + g(c, d))/g(a, b))
raises(ValueError, lambda: S.One/(A(c, d) + g(c, d)))
raises(ValueError, lambda: A(a, b) + A(a, c))
A(a, b) + B(a, b) # assigned to t for below
#raises(NotImplementedError, lambda: TensExpr.__mul__(t, 'a'))
#raises(NotImplementedError, lambda: TensExpr.__add__(t, 'a'))
#raises(NotImplementedError, lambda: TensExpr.__radd__(t, 'a'))
#raises(NotImplementedError, lambda: TensExpr.__sub__(t, 'a'))
#raises(NotImplementedError, lambda: TensExpr.__rsub__(t, 'a'))
#raises(NotImplementedError, lambda: TensExpr.__div__(t, 'a'))
#raises(NotImplementedError, lambda: TensExpr.__rdiv__(t, 'a'))
with ignore_warnings(SymPyDeprecationWarning):
# DO NOT REMOVE THIS AFTER DEPRECATION REMOVED:
raises(ValueError, lambda: A(a, b)**2)
raises(NotImplementedError, lambda: 2**A(a, b))
raises(NotImplementedError, lambda: abs(A(a, b)))
def test_TensorHead():
# simple example of algebraic expression
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
A = TensorHead('A', [Lorentz]*2)
assert A.name == 'A'
assert A.index_types == [Lorentz, Lorentz]
assert A.rank == 2
assert A.symmetry == TensorSymmetry.no_symmetry(2)
assert A.comm == 0
def test_add1():
assert TensAdd().args == ()
assert TensAdd().doit() == 0
# simple example of algebraic expression
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a,b,d0,d1,i,j,k = tensor_indices('a,b,d0,d1,i,j,k', Lorentz)
# A, B symmetric
A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t1 = A(b, -d0)*B(d0, a)
assert TensAdd(t1).equals(t1)
t2a = B(d0, a) + A(d0, a)
t2 = A(b, -d0)*t2a
assert str(t2) == 'A(b, -L_0)*(A(L_0, a) + B(L_0, a))'
t2 = t2.expand()
assert str(t2) == 'A(b, -L_0)*A(L_0, a) + A(b, -L_0)*B(L_0, a)'
t2 = t2.canon_bp()
assert str(t2) == 'A(a, L_0)*A(b, -L_0) + A(b, L_0)*B(a, -L_0)'
t2b = t2 + t1
assert str(t2b) == 'A(a, L_0)*A(b, -L_0) + A(b, -L_0)*B(L_0, a) + A(b, L_0)*B(a, -L_0)'
t2b = t2b.canon_bp()
assert str(t2b) == '2*A(b, L_0)*B(a, -L_0) + A(a, L_0)*A(b, -L_0)'
p, q, r = tensor_heads('p,q,r', [Lorentz])
t = q(d0)*2
assert str(t) == '2*q(d0)'
t = 2*q(d0)
assert str(t) == '2*q(d0)'
t1 = p(d0) + 2*q(d0)
assert str(t1) == '2*q(d0) + p(d0)'
t2 = p(-d0) + 2*q(-d0)
assert str(t2) == '2*q(-d0) + p(-d0)'
t1 = p(d0)
t3 = t1*t2
assert str(t3) == 'p(L_0)*(2*q(-L_0) + p(-L_0))'
t3 = t3.expand()
assert str(t3) == '2*p(L_0)*q(-L_0) + p(L_0)*p(-L_0)'
t3 = t2*t1
t3 = t3.expand()
assert str(t3) == '2*q(-L_0)*p(L_0) + p(-L_0)*p(L_0)'
t3 = t3.canon_bp()
assert str(t3) == '2*p(L_0)*q(-L_0) + p(L_0)*p(-L_0)'
t1 = p(d0) + 2*q(d0)
t3 = t1*t2
t3 = t3.canon_bp()
assert str(t3) == '4*p(L_0)*q(-L_0) + 4*q(L_0)*q(-L_0) + p(L_0)*p(-L_0)'
t1 = p(d0) - 2*q(d0)
assert str(t1) == '-2*q(d0) + p(d0)'
t2 = p(-d0) + 2*q(-d0)
t3 = t1*t2
t3 = t3.canon_bp()
assert t3 == p(d0)*p(-d0) - 4*q(d0)*q(-d0)
t = p(i)*p(j)*(p(k) + q(k)) + p(i)*(p(j) + q(j))*(p(k) - 3*q(k))
t = t.canon_bp()
assert t == 2*p(i)*p(j)*p(k) - 2*p(i)*p(j)*q(k) + p(i)*p(k)*q(j) - 3*p(i)*q(j)*q(k)
t1 = (p(i) + q(i) + 2*r(i))*(p(j) - q(j))
t2 = (p(j) + q(j) + 2*r(j))*(p(i) - q(i))
t = t1 + t2
t = t.canon_bp()
assert t == 2*p(i)*p(j) + 2*p(i)*r(j) + 2*p(j)*r(i) - 2*q(i)*q(j) - 2*q(i)*r(j) - 2*q(j)*r(i)
t = p(i)*q(j)/2
assert 2*t == p(i)*q(j)
t = (p(i) + q(i))/2
assert 2*t == p(i) + q(i)
t = S.One - p(i)*p(-i)
t = t.canon_bp()
tz1 = t + p(-j)*p(j)
assert tz1 != 1
tz1 = tz1.canon_bp()
assert tz1.equals(1)
t = S.One + p(i)*p(-i)
assert (t - p(-j)*p(j)).canon_bp().equals(1)
t = A(a, b) + B(a, b)
assert t.rank == 2
t1 = t - A(a, b) - B(a, b)
assert t1 == 0
t = 1 - (A(a, -a) + B(a, -a))
t1 = 1 + (A(a, -a) + B(a, -a))
assert (t + t1).expand().equals(2)
t2 = 1 + A(a, -a)
assert t1 != t2
assert t2 != TensMul.from_data(0, [], [], [])
def test_special_eq_ne():
# test special equality cases:
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b, d0, d1, i, j, k = tensor_indices('a,b,d0,d1,i,j,k', Lorentz)
# A, B symmetric
A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
p, q, r = tensor_heads('p,q,r', [Lorentz])
t = 0*A(a, b)
assert _is_equal(t, 0)
assert _is_equal(t, S.Zero)
assert p(i) != A(a, b)
assert A(a, -a) != A(a, b)
assert 0*(A(a, b) + B(a, b)) == 0
assert 0*(A(a, b) + B(a, b)) is S.Zero
assert 3*(A(a, b) - A(a, b)) is S.Zero
assert p(i) + q(i) != A(a, b)
assert p(i) + q(i) != A(a, b) + B(a, b)
assert p(i) - p(i) == 0
assert p(i) - p(i) is S.Zero
assert _is_equal(A(a, b), A(b, a))
def test_add2():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
m, n, p, q = tensor_indices('m,n,p,q', Lorentz)
R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
A = TensorHead('A', [Lorentz]*3, TensorSymmetry.fully_symmetric(-3))
t1 = 2*R(m, n, p, q) - R(m, q, n, p) + R(m, p, n, q)
t2 = t1*A(-n, -p, -q)
t2 = t2.canon_bp()
assert t2 == 0
t1 = Rational(2, 3)*R(m,n,p,q) - Rational(1, 3)*R(m,q,n,p) + Rational(1, 3)*R(m,p,n,q)
t2 = t1*A(-n, -p, -q)
t2 = t2.canon_bp()
assert t2 == 0
t = A(m, -m, n) + A(n, p, -p)
t = t.canon_bp()
assert t == 0
def test_add3():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
i0, i1 = tensor_indices('i0:2', Lorentz)
E, px, py, pz = symbols('E px py pz')
A = TensorHead('A', [Lorentz])
B = TensorHead('B', [Lorentz])
expr1 = A(i0)*A(-i0) - (E**2 - px**2 - py**2 - pz**2)
assert expr1.args == (px**2, py**2, pz**2, -E**2, A(i0)*A(-i0))
expr2 = E**2 - px**2 - py**2 - pz**2 - A(i0)*A(-i0)
assert expr2.args == (E**2, -px**2, -py**2, -pz**2, -A(i0)*A(-i0))
expr3 = A(i0)*A(-i0) - E**2 + px**2 + py**2 + pz**2
assert expr3.args == (px**2, py**2, pz**2, -E**2, A(i0)*A(-i0))
expr4 = B(i1)*B(-i1) + 2*E**2 - 2*px**2 - 2*py**2 - 2*pz**2 - A(i0)*A(-i0)
assert expr4.args == (-2*px**2, -2*py**2, -2*pz**2, 2*E**2, -A(i0)*A(-i0), B(i1)*B(-i1))
def test_mul():
from sympy.abc import x
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
t = TensMul.from_data(S.One, [], [], [])
assert str(t) == '1'
A, B = tensor_heads('A B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = (1 + x)*A(a, b)
assert str(t) == '(x + 1)*A(a, b)'
assert t.index_types == [Lorentz, Lorentz]
assert t.rank == 2
assert t.dum == []
assert t.coeff == 1 + x
assert sorted(t.free) == [(a, 0), (b, 1)]
assert t.components == [A]
ts = A(a, b)
assert str(ts) == 'A(a, b)'
assert ts.index_types == [Lorentz, Lorentz]
assert ts.rank == 2
assert ts.dum == []
assert ts.coeff == 1
assert sorted(ts.free) == [(a, 0), (b, 1)]
assert ts.components == [A]
t = A(-b, a)*B(-a, c)*A(-c, d)
t1 = tensor_mul(*t.split())
assert t == t1
assert tensor_mul(*[]) == TensMul.from_data(S.One, [], [], [])
t = TensMul.from_data(1, [], [], [])
C = TensorHead('C', [])
assert str(C()) == 'C'
assert str(t) == '1'
assert t == 1
raises(ValueError, lambda: A(a, b)*A(a, c))
def test_substitute_indices():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
i, j, k, l, m, n, p, q = tensor_indices('i,j,k,l,m,n,p,q', Lorentz)
A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
p = TensorHead('p', [Lorentz])
t = p(i)
t1 = t.substitute_indices((j, k))
assert t1 == t
t1 = t.substitute_indices((i, j))
assert t1 == p(j)
t1 = t.substitute_indices((i, -j))
assert t1 == p(-j)
t1 = t.substitute_indices((-i, j))
assert t1 == p(-j)
t1 = t.substitute_indices((-i, -j))
assert t1 == p(j)
t = A(m, n)
t1 = t.substitute_indices((m, i), (n, -i))
assert t1 == A(n, -n)
t1 = substitute_indices(t, (m, i), (n, -i))
assert t1 == A(n, -n)
t = A(i, k)*B(-k, -j)
t1 = t.substitute_indices((i, j), (j, k))
t1a = A(j, l)*B(-l, -k)
assert t1 == t1a
t1 = substitute_indices(t, (i, j), (j, k))
assert t1 == t1a
t = A(i, j) + B(i, j)
t1 = t.substitute_indices((j, -i))
t1a = A(i, -i) + B(i, -i)
assert t1 == t1a
t1 = substitute_indices(t, (j, -i))
assert t1 == t1a
def test_riemann_cyclic_replace():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
m0, m1, m2, m3 = tensor_indices('m:4', Lorentz)
R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
t = R(m0, m2, m1, m3)
t1 = riemann_cyclic_replace(t)
t1a = Rational(-1, 3)*R(m0, m3, m2, m1) + Rational(1, 3)*R(m0, m1, m2, m3) + Rational(2, 3)*R(m0, m2, m1, m3)
assert t1 == t1a
def test_riemann_cyclic():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
i, j, k, l, m, n, p, q = tensor_indices('i,j,k,l,m,n,p,q', Lorentz)
R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
t = R(i,j,k,l) + R(i,l,j,k) + R(i,k,l,j) - \
R(i,j,l,k) - R(i,l,k,j) - R(i,k,j,l)
t2 = t*R(-i,-j,-k,-l)
t3 = riemann_cyclic(t2)
assert t3 == 0
t = R(i,j,k,l)*(R(-i,-j,-k,-l) - 2*R(-i,-k,-j,-l))
t1 = riemann_cyclic(t)
assert t1 == 0
t = R(i,j,k,l)
t1 = riemann_cyclic(t)
assert t1 == Rational(-1, 3)*R(i, l, j, k) + Rational(1, 3)*R(i, k, j, l) + Rational(2, 3)*R(i, j, k, l)
t = R(i,j,k,l)*R(-k,-l,m,n)*(R(-m,-n,-i,-j) + 2*R(-m,-j,-n,-i))
t1 = riemann_cyclic(t)
assert t1 == 0
@XFAIL
def test_div():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
m0, m1, m2, m3 = tensor_indices('m0:4', Lorentz)
R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann())
t = R(m0,m1,-m1,m3)
t1 = t/S(4)
assert str(t1) == '(1/4)*R(m0, L_0, -L_0, m3)'
t = t.canon_bp()
assert not t1._is_canon_bp
t1 = t*4
assert t1._is_canon_bp
t1 = t1/4
assert t1._is_canon_bp
def test_contract_metric1():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', dim=D, dummy_name='L')
a, b, c, d, e = tensor_indices('a,b,c,d,e', Lorentz)
g = Lorentz.metric
p = TensorHead('p', [Lorentz])
t = g(a, b)*p(-b)
t1 = t.contract_metric(g)
assert t1 == p(a)
A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
# case with g with all free indices
t1 = A(a,b)*B(-b,c)*g(d, e)
t2 = t1.contract_metric(g)
assert t1 == t2
# case of g(d, -d)
t1 = A(a,b)*B(-b,c)*g(-d, d)
t2 = t1.contract_metric(g)
assert t2 == D*A(a, d)*B(-d, c)
# g with one free index
t1 = A(a,b)*B(-b,-c)*g(c, d)
t2 = t1.contract_metric(g)
assert t2 == A(a, c)*B(-c, d)
# g with both indices contracted with another tensor
t1 = A(a,b)*B(-b,-c)*g(c, -a)
t2 = t1.contract_metric(g)
assert _is_equal(t2, A(a, b)*B(-b, -a))
t1 = A(a,b)*B(-b,-c)*g(c, d)*g(-a, -d)
t2 = t1.contract_metric(g)
assert _is_equal(t2, A(a,b)*B(-b,-a))
t1 = A(a,b)*g(-a,-b)
t2 = t1.contract_metric(g)
assert _is_equal(t2, A(a, -a))
assert not t2.free
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
a, b = tensor_indices('a,b', Lorentz)
g = Lorentz.metric
assert _is_equal(g(a, -a).contract_metric(g), Lorentz.dim) # no dim
def test_contract_metric2():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', dim=D, dummy_name='L')
a, b, c, d, e, L_0 = tensor_indices('a,b,c,d,e,L_0', Lorentz)
g = Lorentz.metric
p, q = tensor_heads('p,q', [Lorentz])
t1 = g(a,b)*p(c)*p(-c)
t2 = 3*g(-a,-b)*q(c)*q(-c)
t = t1*t2
t = t.contract_metric(g)
assert t == 3*D*p(a)*p(-a)*q(b)*q(-b)
t1 = g(a,b)*p(c)*p(-c)
t2 = 3*q(-a)*q(-b)
t = t1*t2
t = t.contract_metric(g)
t = t.canon_bp()
assert t == 3*p(a)*p(-a)*q(b)*q(-b)
t1 = 2*g(a,b)*p(c)*p(-c)
t2 = - 3*g(-a,-b)*q(c)*q(-c)
t = t1*t2
t = t.contract_metric(g)
t = 6*g(a,b)*g(-a,-b)*p(c)*p(-c)*q(d)*q(-d)
t = t.contract_metric(g)
t1 = 2*g(a,b)*p(c)*p(-c)
t2 = q(-a)*q(-b) + 3*g(-a,-b)*q(c)*q(-c)
t = t1*t2
t = t.contract_metric(g)
assert t == (2 + 6*D)*p(a)*p(-a)*q(b)*q(-b)
t1 = p(a)*p(b) + p(a)*q(b) + 2*g(a,b)*p(c)*p(-c)
t2 = q(-a)*q(-b) - g(-a,-b)*q(c)*q(-c)
t = t1*t2
t = t.contract_metric(g)
t1 = (1 - 2*D)*p(a)*p(-a)*q(b)*q(-b) + p(a)*q(-a)*p(b)*q(-b)
assert canon_bp(t - t1) == 0
t = g(a,b)*g(c,d)*g(-b,-c)
t1 = t.contract_metric(g)
assert t1 == g(a, d)
t1 = g(a,b)*g(c,d) + g(a,c)*g(b,d) + g(a,d)*g(b,c)
t2 = t1.substitute_indices((a,-a),(b,-b),(c,-c),(d,-d))
t = t1*t2
t = t.contract_metric(g)
assert t.equals(3*D**2 + 6*D)
t = 2*p(a)*g(b,-b)
t1 = t.contract_metric(g)
assert t1.equals(2*D*p(a))
t = 2*p(a)*g(b,-a)
t1 = t.contract_metric(g)
assert t1 == 2*p(b)
M = Symbol('M')
t = (p(a)*p(b) + g(a, b)*M**2)*g(-a, -b) - D*M**2
t1 = t.contract_metric(g)
assert t1 == p(a)*p(-a)
A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
t = A(a, b)*p(L_0)*g(-a, -b)
t1 = t.contract_metric(g)
assert str(t1) == 'A(L_1, -L_1)*p(L_0)' or str(t1) == 'A(-L_1, L_1)*p(L_0)'
def test_metric_contract3():
D = Symbol('D')
Spinor = TensorIndexType('Spinor', dim=D, metric_symmetry=-1, dummy_name='S')
a0, a1, a2, a3, a4 = tensor_indices('a0:5', Spinor)
C = Spinor.metric
chi, psi = tensor_heads('chi,psi', [Spinor], TensorSymmetry.no_symmetry(1), 1)
B = TensorHead('B', [Spinor]*2, TensorSymmetry.no_symmetry(2))
t = C(a0,-a0)
t1 = t.contract_metric(C)
assert t1.equals(-D)
t = C(-a0,a0)
t1 = t.contract_metric(C)
assert t1.equals(D)
t = C(a0,a1)*C(-a0,-a1)
t1 = t.contract_metric(C)
assert t1.equals(D)
t = C(a1,a0)*C(-a0,-a1)
t1 = t.contract_metric(C)
assert t1.equals(-D)
t = C(-a0,a1)*C(a0,-a1)
t1 = t.contract_metric(C)
assert t1.equals(-D)
t = C(a1,-a0)*C(a0,-a1)
t1 = t.contract_metric(C)
assert t1.equals(D)
t = C(a0,a1)*B(-a1,-a0)
t1 = t.contract_metric(C)
t1 = t1.canon_bp()
assert _is_equal(t1, B(a0,-a0))
t = C(a1,a0)*B(-a1,-a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, -B(a0,-a0))
t = C(a0,-a1)*B(a1,-a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, -B(a0,-a0))
t = C(-a0,a1)*B(-a1,a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, -B(a0,-a0))
t = C(-a0,-a1)*B(a1,a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, B(a0,-a0))
t = C(-a1, a0)*B(a1,-a0)
t1 = t.contract_metric(C)
assert _is_equal(t1, B(a0,-a0))
t = C(a0,a1)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, psi(a0))
t = C(a1,a0)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -psi(a0))
t = C(a0,a1)*chi(-a0)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -chi(a1)*psi(-a1))
t = C(a1,a0)*chi(-a0)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, chi(a1)*psi(-a1))
t = C(-a1,a0)*chi(-a0)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, chi(-a1)*psi(a1))
t = C(a0,-a1)*chi(-a0)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -chi(-a1)*psi(a1))
t = C(-a0,-a1)*chi(a0)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, chi(-a1)*psi(a1))
t = C(-a1,-a0)*chi(a0)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -chi(-a1)*psi(a1))
t = C(-a1,-a0)*B(a0,a2)*psi(a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, -B(-a1,a2)*psi(a1))
t = C(a1,a0)*B(-a2,-a0)*psi(-a1)
t1 = t.contract_metric(C)
assert _is_equal(t1, B(-a2,a1)*psi(-a1))
def test_epsilon():
Lorentz = TensorIndexType('Lorentz', dim=4, dummy_name='L')
a, b, c, d, e = tensor_indices('a,b,c,d,e', Lorentz)
epsilon = Lorentz.epsilon
p, q, r, s = tensor_heads('p,q,r,s', [Lorentz])
t = epsilon(b,a,c,d)
t1 = t.canon_bp()
assert t1 == -epsilon(a,b,c,d)
t = epsilon(c,b,d,a)
t1 = t.canon_bp()
assert t1 == epsilon(a,b,c,d)
t = epsilon(c,a,d,b)
t1 = t.canon_bp()
assert t1 == -epsilon(a,b,c,d)
t = epsilon(a,b,c,d)*p(-a)*q(-b)
t1 = t.canon_bp()
assert t1 == epsilon(c,d,a,b)*p(-a)*q(-b)
t = epsilon(c,b,d,a)*p(-a)*q(-b)
t1 = t.canon_bp()
assert t1 == epsilon(c,d,a,b)*p(-a)*q(-b)
t = epsilon(c,a,d,b)*p(-a)*q(-b)
t1 = t.canon_bp()
assert t1 == -epsilon(c,d,a,b)*p(-a)*q(-b)
t = epsilon(c,a,d,b)*p(-a)*p(-b)
t1 = t.canon_bp()
assert t1 == 0
t = epsilon(c,a,d,b)*p(-a)*q(-b) + epsilon(a,b,c,d)*p(-b)*q(-a)
t1 = t.canon_bp()
assert t1 == -2*epsilon(c,d,a,b)*p(-a)*q(-b)
# Test that epsilon can be create with a SymPy integer:
Lorentz = TensorIndexType('Lorentz', dim=Integer(4), dummy_name='L')
epsilon = Lorentz.epsilon
assert isinstance(epsilon, TensorHead)
def test_contract_delta1():
# see Group Theory by Cvitanovic page 9
n = Symbol('n')
Color = TensorIndexType('Color', dim=n, dummy_name='C')
a, b, c, d, e, f = tensor_indices('a,b,c,d,e,f', Color)
delta = Color.delta
def idn(a, b, d, c):
assert a.is_up and d.is_up
assert not (b.is_up or c.is_up)
return delta(a,c)*delta(d,b)
def T(a, b, d, c):
assert a.is_up and d.is_up
assert not (b.is_up or c.is_up)
return delta(a,b)*delta(d,c)
def P1(a, b, c, d):
return idn(a,b,c,d) - 1/n*T(a,b,c,d)
def P2(a, b, c, d):
return 1/n*T(a,b,c,d)
t = P1(a, -b, e, -f)*P1(f, -e, d, -c)
t1 = t.contract_delta(delta)
assert canon_bp(t1 - P1(a, -b, d, -c)) == 0
t = P2(a, -b, e, -f)*P2(f, -e, d, -c)
t1 = t.contract_delta(delta)
assert t1 == P2(a, -b, d, -c)
t = P1(a, -b, e, -f)*P2(f, -e, d, -c)
t1 = t.contract_delta(delta)
assert t1 == 0
t = P1(a, -b, b, -a)
t1 = t.contract_delta(delta)
assert t1.equals(n**2 - 1)
@filter_warnings_decorator
def test_fun():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', dim=D, dummy_name='L')
a, b, c, d, e = tensor_indices('a,b,c,d,e', Lorentz)
g = Lorentz.metric
p, q = tensor_heads('p q', [Lorentz])
t = q(c)*p(a)*q(b) + g(a,b)*g(c,d)*q(-d)
assert t(a,b,c) == t
assert canon_bp(t - t(b,a,c) - q(c)*p(a)*q(b) + q(c)*p(b)*q(a)) == 0
assert t(b,c,d) == q(d)*p(b)*q(c) + g(b,c)*g(d,e)*q(-e)
t1 = t.substitute_indices((a,b),(b,a))
assert canon_bp(t1 - q(c)*p(b)*q(a) - g(a,b)*g(c,d)*q(-d)) == 0
# check that g_{a b; c} = 0
# example taken from L. Brewin
# "A brief introduction to Cadabra" arxiv:0903.2085
# dg_{a b c} = \partial_{a} g_{b c} is symmetric in b, c
dg = TensorHead('dg', [Lorentz]*3, TensorSymmetry.direct_product(1, 2))
# gamma^a_{b c} is the Christoffel symbol
gamma = S.Half*g(a,d)*(dg(-b,-d,-c) + dg(-c,-b,-d) - dg(-d,-b,-c))
# t = g_{a b; c}
t = dg(-c,-a,-b) - g(-a,-d)*gamma(d,-b,-c) - g(-b,-d)*gamma(d,-a,-c)
t = t.contract_metric(g)
assert t == 0
t = q(c)*p(a)*q(b)
assert t(b,c,d) == q(d)*p(b)*q(c)
def test_TensorManager():
Lorentz = TensorIndexType('Lorentz', dummy_name='L')
LorentzH = TensorIndexType('LorentzH', dummy_name='LH')
i, j = tensor_indices('i,j', Lorentz)
ih, jh = tensor_indices('ih,jh', LorentzH)
p, q = tensor_heads('p q', [Lorentz])
ph, qh = tensor_heads('ph qh', [LorentzH])
Gsymbol = Symbol('Gsymbol')
GHsymbol = Symbol('GHsymbol')
TensorManager.set_comm(Gsymbol, GHsymbol, 0)
G = TensorHead('G', [Lorentz], TensorSymmetry.no_symmetry(1), Gsymbol)
assert TensorManager._comm_i2symbol[G.comm] == Gsymbol
GH = TensorHead('GH', [LorentzH], TensorSymmetry.no_symmetry(1), GHsymbol)
ps = G(i)*p(-i)
psh = GH(ih)*ph(-ih)
t = ps + psh
t1 = t*t
assert canon_bp(t1 - ps*ps - 2*ps*psh - psh*psh) == 0
qs = G(i)*q(-i)
qsh = GH(ih)*qh(-ih)
assert _is_equal(ps*qsh, qsh*ps)
assert not _is_equal(ps*qs, qs*ps)
n = TensorManager.comm_symbols2i(Gsymbol)
assert TensorManager.comm_i2symbol(n) == Gsymbol
assert GHsymbol in TensorManager._comm_symbols2i
raises(ValueError, lambda: TensorManager.set_comm(GHsymbol, 1, 2))
TensorManager.set_comms((Gsymbol,GHsymbol,0),(Gsymbol,1,1))
assert TensorManager.get_comm(n, 1) == TensorManager.get_comm(1, n) == 1
TensorManager.clear()
assert TensorManager.comm == [{0:0, 1:0, 2:0}, {0:0, 1:1, 2:None}, {0:0, 1:None}]
assert GHsymbol not in TensorManager._comm_symbols2i
nh = TensorManager.comm_symbols2i(GHsymbol)
assert TensorManager.comm_i2symbol(nh) == GHsymbol
assert GHsymbol in TensorManager._comm_symbols2i
def test_hash():
D = Symbol('D')
Lorentz = TensorIndexType('Lorentz', dim=D, dummy_name='L')
a, b, c, d, e = tensor_indices('a,b,c,d,e', Lorentz)
g = Lorentz.metric
p, q = tensor_heads('p q', [Lorentz])
p_type = p.args[1]
t1 = p(a)*q(b)
t2 = p(a)*p(b)
assert hash(t1) != hash(t2)
t3 = p(a)*p(b) + g(a,b)
t4 = p(a)*p(b) - g(a,b)
assert hash(t3) != hash(t4)
assert a.func(*a.args) == a
assert Lorentz.func(*Lorentz.args) == Lorentz
assert g.func(*g.args) == g
assert p.func(*p.args) == p
assert p_type.func(*p_type.args) == p_type
assert p(a).func(*(p(a)).args) == p(a)
assert t1.func(*t1.args) == t1
assert t2.func(*t2.args) == t2
assert t3.func(*t3.args) == t3
assert t4.func(*t4.args) == t4
assert hash(a.func(*a.args)) == hash(a)
assert hash(Lorentz.func(*Lorentz.args)) == hash(Lorentz)
assert hash(g.func(*g.args)) == hash(g)
assert hash(p.func(*p.args)) == hash(p)
assert hash(p_type.func(*p_type.args)) == hash(p_type)
assert hash(p(a).func(*(p(a)).args)) == hash(p(a))
assert hash(t1.func(*t1.args)) == hash(t1)
assert hash(t2.func(*t2.args)) == hash(t2)
assert hash(t3.func(*t3.args)) == hash(t3)
assert hash(t4.func(*t4.args)) == hash(t4)
def check_all(obj):
return all([isinstance(_, Basic) for _ in obj.args])
assert check_all(a)
assert check_all(Lorentz)
assert check_all(g)
assert check_all(p)
assert check_all(p_type)
assert check_all(p(a))
assert check_all(t1)
assert check_all(t2)
assert check_all(t3)
assert check_all(t4)
tsymmetry = TensorSymmetry.direct_product(-2, 1, 3)
assert tsymmetry.func(*tsymmetry.args) == tsymmetry
assert hash(tsymmetry.func(*tsymmetry.args)) == hash(tsymmetry)
assert check_all(tsymmetry)
### TEST VALUED TENSORS ###
def _get_valued_base_test_variables():
minkowski = Matrix((
(1, 0, 0, 0),
(0, -1, 0, 0),
(0, 0, -1, 0),
(0, 0, 0, -1),
))
Lorentz = TensorIndexType('Lorentz', dim=4)
Lorentz.data = minkowski
i0, i1, i2, i3, i4 = tensor_indices('i0:5', Lorentz)
E, px, py, pz = symbols('E px py pz')
A = TensorHead('A', [Lorentz])
A.data = [E, px, py, pz]
B = TensorHead('B', [Lorentz], TensorSymmetry.no_symmetry(1), 'Gcomm')
B.data = range(4)
AB = TensorHead("AB", [Lorentz]*2)
AB.data = minkowski
ba_matrix = Matrix((
(1, 2, 3, 4),
(5, 6, 7, 8),
(9, 0, -1, -2),
(-3, -4, -5, -6),
))
BA = TensorHead("BA", [Lorentz]*2)
BA.data = ba_matrix
# Let's test the diagonal metric, with inverted Minkowski metric:
LorentzD = TensorIndexType('LorentzD')
LorentzD.data = [-1, 1, 1, 1]
mu0, mu1, mu2 = tensor_indices('mu0:3', LorentzD)
C = TensorHead('C', [LorentzD])
C.data = [E, px, py, pz]
### non-diagonal metric ###
ndm_matrix = (
(1, 1, 0,),
(1, 0, 1),
(0, 1, 0,),
)
ndm = TensorIndexType("ndm")
ndm.data = ndm_matrix
n0, n1, n2 = tensor_indices('n0:3', ndm)
NA = TensorHead('NA', [ndm])
NA.data = range(10, 13)
NB = TensorHead('NB', [ndm]*2)
NB.data = [[i+j for j in range(10, 13)] for i in range(10, 13)]
NC = TensorHead('NC', [ndm]*3)
NC.data = [[[i+j+k for k in range(4, 7)] for j in range(1, 4)] for i in range(2, 5)]
return (A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4)
@filter_warnings_decorator
def test_valued_tensor_iter():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
list_BA = [Array([1, 2, 3, 4]), Array([5, 6, 7, 8]), Array([9, 0, -1, -2]), Array([-3, -4, -5, -6])]
# iteration on VTensorHead
assert list(A) == [E, px, py, pz]
assert list(ba_matrix) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, -1, -2, -3, -4, -5, -6]
assert list(BA) == list_BA
# iteration on VTensMul
assert list(A(i1)) == [E, px, py, pz]
assert list(BA(i1, i2)) == list_BA
assert list(3 * BA(i1, i2)) == [3 * i for i in list_BA]
assert list(-5 * BA(i1, i2)) == [-5 * i for i in list_BA]
# iteration on VTensAdd
# A(i1) + A(i1)
assert list(A(i1) + A(i1)) == [2*E, 2*px, 2*py, 2*pz]
assert BA(i1, i2) - BA(i1, i2) == 0
assert list(BA(i1, i2) - 2 * BA(i1, i2)) == [-i for i in list_BA]
@filter_warnings_decorator
def test_valued_tensor_covariant_contravariant_elements():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
assert A(-i0)[0] == A(i0)[0]
assert A(-i0)[1] == -A(i0)[1]
assert AB(i0, i1)[1, 1] == -1
assert AB(i0, -i1)[1, 1] == 1
assert AB(-i0, -i1)[1, 1] == -1
assert AB(-i0, i1)[1, 1] == 1
@filter_warnings_decorator
def test_valued_tensor_get_matrix():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
matab = AB(i0, i1).get_matrix()
assert matab == Matrix([
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, -1],
])
# when alternating contravariant/covariant with [1, -1, -1, -1] metric
# it becomes the identity matrix:
assert AB(i0, -i1).get_matrix() == eye(4)
# covariant and contravariant forms:
assert A(i0).get_matrix() == Matrix([E, px, py, pz])
assert A(-i0).get_matrix() == Matrix([E, -px, -py, -pz])
@filter_warnings_decorator
def test_valued_tensor_contraction():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
assert (A(i0) * A(-i0)).data == E ** 2 - px ** 2 - py ** 2 - pz ** 2
assert (A(i0) * A(-i0)).data == A ** 2
assert (A(i0) * A(-i0)).data == A(i0) ** 2
assert (A(i0) * B(-i0)).data == -px - 2 * py - 3 * pz
for i in range(4):
for j in range(4):
assert (A(i0) * B(-i1))[i, j] == [E, px, py, pz][i] * [0, -1, -2, -3][j]
# test contraction on the alternative Minkowski metric: [-1, 1, 1, 1]
assert (C(mu0) * C(-mu0)).data == -E ** 2 + px ** 2 + py ** 2 + pz ** 2
contrexp = A(i0) * AB(i1, -i0)
assert A(i0).rank == 1
assert AB(i1, -i0).rank == 2
assert contrexp.rank == 1
for i in range(4):
assert contrexp[i] == [E, px, py, pz][i]
@filter_warnings_decorator
def test_valued_tensor_self_contraction():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
assert AB(i0, -i0).data == 4
assert BA(i0, -i0).data == 2
@filter_warnings_decorator
def test_valued_tensor_pow():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
assert C**2 == -E**2 + px**2 + py**2 + pz**2
assert C**1 == sqrt(-E**2 + px**2 + py**2 + pz**2)
assert C(mu0)**2 == C**2
assert C(mu0)**1 == C**1
@filter_warnings_decorator
def test_valued_tensor_expressions():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
x1, x2, x3 = symbols('x1:4')
# test coefficient in contraction:
rank2coeff = x1 * A(i3) * B(i2)
assert rank2coeff[1, 1] == x1 * px
assert rank2coeff[3, 3] == 3 * pz * x1
coeff_expr = ((x1 * A(i4)) * (B(-i4) / x2)).data
assert coeff_expr.expand() == -px*x1/x2 - 2*py*x1/x2 - 3*pz*x1/x2
add_expr = A(i0) + B(i0)
assert add_expr[0] == E
assert add_expr[1] == px + 1
assert add_expr[2] == py + 2
assert add_expr[3] == pz + 3
sub_expr = A(i0) - B(i0)
assert sub_expr[0] == E
assert sub_expr[1] == px - 1
assert sub_expr[2] == py - 2
assert sub_expr[3] == pz - 3
assert (add_expr * B(-i0)).data == -px - 2*py - 3*pz - 14
expr1 = x1*A(i0) + x2*B(i0)
expr2 = expr1 * B(i1) * (-4)
expr3 = expr2 + 3*x3*AB(i0, i1)
expr4 = expr3 / 2
assert expr4 * 2 == expr3
expr5 = (expr4 * BA(-i1, -i0))
assert expr5.data.expand() == 28*E*x1 + 12*px*x1 + 20*py*x1 + 28*pz*x1 + 136*x2 + 3*x3
@filter_warnings_decorator
def test_valued_tensor_add_scalar():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
# one scalar summand after the contracted tensor
expr1 = A(i0)*A(-i0) - (E**2 - px**2 - py**2 - pz**2)
assert expr1.data == 0
# multiple scalar summands in front of the contracted tensor
expr2 = E**2 - px**2 - py**2 - pz**2 - A(i0)*A(-i0)
assert expr2.data == 0
# multiple scalar summands after the contracted tensor
expr3 = A(i0)*A(-i0) - E**2 + px**2 + py**2 + pz**2
assert expr3.data == 0
# multiple scalar summands and multiple tensors
expr4 = C(mu0)*C(-mu0) + 2*E**2 - 2*px**2 - 2*py**2 - 2*pz**2 - A(i0)*A(-i0)
assert expr4.data == 0
@filter_warnings_decorator
def test_noncommuting_components():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
euclid = TensorIndexType('Euclidean')
euclid.data = [1, 1]
i1, i2, i3 = tensor_indices('i1:4', euclid)
a, b, c, d = symbols('a b c d', commutative=False)
V1 = TensorHead('V1', [euclid]*2)
V1.data = [[a, b], (c, d)]
V2 = TensorHead('V2', [euclid]*2)
V2.data = [[a, c], [b, d]]
vtp = V1(i1, i2) * V2(-i2, -i1)
assert vtp.data == a**2 + b**2 + c**2 + d**2
assert vtp.data != a**2 + 2*b*c + d**2
vtp2 = V1(i1, i2)*V1(-i2, -i1)
assert vtp2.data == a**2 + b*c + c*b + d**2
assert vtp2.data != a**2 + 2*b*c + d**2
Vc = (b * V1(i1, -i1)).data
assert Vc.expand() == b * a + b * d
@filter_warnings_decorator
def test_valued_non_diagonal_metric():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
mmatrix = Matrix(ndm_matrix)
assert (NA(n0)*NA(-n0)).data == (NA(n0).get_matrix().T * mmatrix * NA(n0).get_matrix())[0, 0]
@filter_warnings_decorator
def test_valued_assign_numpy_ndarray():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
# this is needed to make sure that a numpy.ndarray can be assigned to a
# tensor.
arr = [E+1, px-1, py, pz]
A.data = Array(arr)
for i in range(4):
assert A(i0).data[i] == arr[i]
qx, qy, qz = symbols('qx qy qz')
A(-i0).data = Array([E, qx, qy, qz])
for i in range(4):
assert A(i0).data[i] == [E, -qx, -qy, -qz][i]
assert A.data[i] == [E, -qx, -qy, -qz][i]
# test on multi-indexed tensors.
random_4x4_data = [[(i**3-3*i**2)%(j+7) for i in range(4)] for j in range(4)]
AB(-i0, -i1).data = random_4x4_data
for i in range(4):
for j in range(4):
assert AB(i0, i1).data[i, j] == random_4x4_data[i][j]*(-1 if i else 1)*(-1 if j else 1)
assert AB(-i0, i1).data[i, j] == random_4x4_data[i][j]*(-1 if j else 1)
assert AB(i0, -i1).data[i, j] == random_4x4_data[i][j]*(-1 if i else 1)
assert AB(-i0, -i1).data[i, j] == random_4x4_data[i][j]
AB(-i0, i1).data = random_4x4_data
for i in range(4):
for j in range(4):
assert AB(i0, i1).data[i, j] == random_4x4_data[i][j]*(-1 if i else 1)
assert AB(-i0, i1).data[i, j] == random_4x4_data[i][j]
assert AB(i0, -i1).data[i, j] == random_4x4_data[i][j]*(-1 if i else 1)*(-1 if j else 1)
assert AB(-i0, -i1).data[i, j] == random_4x4_data[i][j]*(-1 if j else 1)
@filter_warnings_decorator
def test_valued_metric_inverse():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
# let's assign some fancy matrix, just to verify it:
# (this has no physical sense, it's just testing sympy);
# it is symmetrical:
md = [[2, 2, 2, 1], [2, 3, 1, 0], [2, 1, 2, 3], [1, 0, 3, 2]]
Lorentz.data = md
m = Matrix(md)
metric = Lorentz.metric
minv = m.inv()
meye = eye(4)
# the Kronecker Delta:
KD = Lorentz.get_kronecker_delta()
for i in range(4):
for j in range(4):
assert metric(i0, i1).data[i, j] == m[i, j]
assert metric(-i0, -i1).data[i, j] == minv[i, j]
assert metric(i0, -i1).data[i, j] == meye[i, j]
assert metric(-i0, i1).data[i, j] == meye[i, j]
assert metric(i0, i1)[i, j] == m[i, j]
assert metric(-i0, -i1)[i, j] == minv[i, j]
assert metric(i0, -i1)[i, j] == meye[i, j]
assert metric(-i0, i1)[i, j] == meye[i, j]
assert KD(i0, -i1)[i, j] == meye[i, j]
@filter_warnings_decorator
def test_valued_canon_bp_swapaxes():
(A, B, AB, BA, C, Lorentz, E, px, py, pz, LorentzD, mu0, mu1, mu2, ndm, n0, n1,
n2, NA, NB, NC, minkowski, ba_matrix, ndm_matrix, i0, i1, i2, i3, i4) = _get_valued_base_test_variables()
e1 = A(i1)*A(i0)
e2 = e1.canon_bp()
assert e2 == A(i0)*A(i1)
for i in range(4):
for j in range(4):
assert e1[i, j] == e2[j, i]
o1 = B(i2)*A(i1)*B(i0)
o2 = o1.canon_bp()
for i in range(4):
for j in range(4):
for k in range(4):
assert o1[i, j, k] == o2[j, i, k]
@filter_warnings_decorator
def test_valued_components_with_wrong_symmetry():
IT = TensorIndexType('IT', dim=3)
i0, i1, i2, i3 = tensor_indices('i0:4', IT)
IT.data = [1, 1, 1]
A_nosym = TensorHead('A', [IT]*2)
A_sym = TensorHead('A', [IT]*2, TensorSymmetry.fully_symmetric(2))
A_antisym = TensorHead('A', [IT]*2, TensorSymmetry.fully_symmetric(-2))
mat_nosym = Matrix([[1,2,3],[4,5,6],[7,8,9]])
mat_sym = mat_nosym + mat_nosym.T
mat_antisym = mat_nosym - mat_nosym.T
A_nosym.data = mat_nosym
A_nosym.data = mat_sym
A_nosym.data = mat_antisym
def assign(A, dat):
A.data = dat
A_sym.data = mat_sym
raises(ValueError, lambda: assign(A_sym, mat_nosym))
raises(ValueError, lambda: assign(A_sym, mat_antisym))
A_antisym.data = mat_antisym
raises(ValueError, lambda: assign(A_antisym, mat_sym))
raises(ValueError, lambda: assign(A_antisym, mat_nosym))
A_sym.data = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
A_antisym.data = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
@filter_warnings_decorator
def test_issue_10972_TensMul_data():
Lorentz = TensorIndexType('Lorentz', metric_symmetry=1, dummy_name='i', dim=2)
Lorentz.data = [-1, 1]
mu, nu, alpha, beta = tensor_indices('\\mu, \\nu, \\alpha, \\beta',
Lorentz)
u = TensorHead('u', [Lorentz])
u.data = [1, 0]
F = TensorHead('F', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2))
F.data = [[0, 1],
[-1, 0]]
mul_1 = F(mu, alpha) * u(-alpha) * F(nu, beta) * u(-beta)
assert (mul_1.data == Array([[0, 0], [0, 1]]))
mul_2 = F(mu, alpha) * F(nu, beta) * u(-alpha) * u(-beta)
assert (mul_2.data == mul_1.data)
assert ((mul_1 + mul_1).data == 2 * mul_1.data)
@filter_warnings_decorator
def test_TensMul_data():
Lorentz = TensorIndexType('Lorentz', metric_symmetry=1, dummy_name='L', dim=4)
Lorentz.data = [-1, 1, 1, 1]
mu, nu, alpha, beta = tensor_indices('\\mu, \\nu, \\alpha, \\beta',
Lorentz)
u = TensorHead('u', [Lorentz])
u.data = [1, 0, 0, 0]
F = TensorHead('F', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2))
Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z')
F.data = [
[0, Ex, Ey, Ez],
[-Ex, 0, Bz, -By],
[-Ey, -Bz, 0, Bx],
[-Ez, By, -Bx, 0]]
E = F(mu, nu) * u(-nu)
assert ((E(mu) * E(nu)).data ==
Array([[0, 0, 0, 0],
[0, Ex ** 2, Ex * Ey, Ex * Ez],
[0, Ex * Ey, Ey ** 2, Ey * Ez],
[0, Ex * Ez, Ey * Ez, Ez ** 2]])
)
assert ((E(mu) * E(nu)).canon_bp().data == (E(mu) * E(nu)).data)
assert ((F(mu, alpha) * F(beta, nu) * u(-alpha) * u(-beta)).data ==
- (E(mu) * E(nu)).data
)
assert ((F(alpha, mu) * F(beta, nu) * u(-alpha) * u(-beta)).data ==
(E(mu) * E(nu)).data
)
g = TensorHead('g', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
g.data = Lorentz.data
# tensor 'perp' is orthogonal to vector 'u'
perp = u(mu) * u(nu) + g(mu, nu)
mul_1 = u(-mu) * perp(mu, nu)
assert (mul_1.data == Array([0, 0, 0, 0]))
mul_2 = u(-mu) * perp(mu, alpha) * perp(nu, beta)
assert (mul_2.data == Array.zeros(4, 4, 4))
Fperp = perp(mu, alpha) * perp(nu, beta) * F(-alpha, -beta)
assert (Fperp.data[0, :] == Array([0, 0, 0, 0]))
assert (Fperp.data[:, 0] == Array([0, 0, 0, 0]))
mul_3 = u(-mu) * Fperp(mu, nu)
assert (mul_3.data == Array([0, 0, 0, 0]))
@filter_warnings_decorator
def test_issue_11020_TensAdd_data():
Lorentz = TensorIndexType('Lorentz', metric_symmetry=1, dummy_name='i', dim=2)
Lorentz.data = [-1, 1]
a, b, c, d = tensor_indices('a, b, c, d', Lorentz)
i0, i1 = tensor_indices('i_0:2', Lorentz)
# metric tensor
g = TensorHead('g', [Lorentz]*2, TensorSymmetry.fully_symmetric(2))
g.data = Lorentz.data
u = TensorHead('u', [Lorentz])
u.data = [1, 0]
add_1 = g(b, c) * g(d, i0) * u(-i0) - g(b, c) * u(d)
assert (add_1.data == Array.zeros(2, 2, 2))
# Now let us replace index `d` with `a`:
add_2 = g(b, c) * g(a, i0) * u(-i0) - g(b, c) * u(a)
assert (add_2.data == Array.zeros(2, 2, 2))
# some more tests
# perp is tensor orthogonal to u^\mu
perp = u(a) * u(b) + g(a, b)
mul_1 = u(-a) * perp(a, b)
assert (mul_1.data == Array([0, 0]))
mul_2 = u(-c) * perp(c, a) * perp(d, b)
assert (mul_2.data == Array.zeros(2, 2, 2))
def test_index_iteration():
L = TensorIndexType("Lorentz", dummy_name="L")
i0, i1, i2, i3, i4 = tensor_indices('i0:5', L)
L0 = tensor_indices('L_0', L)
L1 = tensor_indices('L_1', L)
A = TensorHead("A", [L, L])
B = TensorHead("B", [L, L], TensorSymmetry.fully_symmetric(2))
e1 = A(i0,i2)
e2 = A(i0,-i0)
e3 = A(i0,i1)*B(i2,i3)
e4 = A(i0,i1)*B(i2,-i1)
e5 = A(i0,i1)*B(-i0,-i1)
e6 = e1 + e4
assert list(e1._iterate_free_indices) == [(i0, (1, 0)), (i2, (1, 1))]
assert list(e1._iterate_dummy_indices) == []
assert list(e1._iterate_indices) == [(i0, (1, 0)), (i2, (1, 1))]
assert list(e2._iterate_free_indices) == []
assert list(e2._iterate_dummy_indices) == [(L0, (1, 0)), (-L0, (1, 1))]
assert list(e2._iterate_indices) == [(L0, (1, 0)), (-L0, (1, 1))]
assert list(e3._iterate_free_indices) == [(i0, (0, 1, 0)), (i1, (0, 1, 1)), (i2, (1, 1, 0)), (i3, (1, 1, 1))]
assert list(e3._iterate_dummy_indices) == []
assert list(e3._iterate_indices) == [(i0, (0, 1, 0)), (i1, (0, 1, 1)), (i2, (1, 1, 0)), (i3, (1, 1, 1))]
assert list(e4._iterate_free_indices) == [(i0, (0, 1, 0)), (i2, (1, 1, 0))]
assert list(e4._iterate_dummy_indices) == [(L0, (0, 1, 1)), (-L0, (1, 1, 1))]
assert list(e4._iterate_indices) == [(i0, (0, 1, 0)), (L0, (0, 1, 1)), (i2, (1, 1, 0)), (-L0, (1, 1, 1))]
assert list(e5._iterate_free_indices) == []
assert list(e5._iterate_dummy_indices) == [(L0, (0, 1, 0)), (L1, (0, 1, 1)), (-L0, (1, 1, 0)), (-L1, (1, 1, 1))]
assert list(e5._iterate_indices) == [(L0, (0, 1, 0)), (L1, (0, 1, 1)), (-L0, (1, 1, 0)), (-L1, (1, 1, 1))]
assert list(e6._iterate_free_indices) == [(i0, (0, 1, 0)), (i2, (0, 1, 1)), (i0, (1, 0, 1, 0)), (i2, (1, 1, 1, 0))]
assert list(e6._iterate_dummy_indices) == [(L0, (1, 0, 1, 1)), (-L0, (1, 1, 1, 1))]
assert list(e6._iterate_indices) == [(i0, (0, 1, 0)), (i2, (0, 1, 1)), (i0, (1, 0, 1, 0)), (L0, (1, 0, 1, 1)), (i2, (1, 1, 1, 0)), (-L0, (1, 1, 1, 1))]
assert e1.get_indices() == [i0, i2]
assert e1.get_free_indices() == [i0, i2]
assert e2.get_indices() == [L0, -L0]
assert e2.get_free_indices() == []
assert e3.get_indices() == [i0, i1, i2, i3]
assert e3.get_free_indices() == [i0, i1, i2, i3]
assert e4.get_indices() == [i0, L0, i2, -L0]
assert e4.get_free_indices() == [i0, i2]
assert e5.get_indices() == [L0, L1, -L0, -L1]
assert e5.get_free_indices() == []
def test_tensor_expand():
L = TensorIndexType("L")
i, j, k = tensor_indices("i j k", L)
L_0 = TensorIndex("L_0", L)
A, B, C, D = tensor_heads("A B C D", [L])
assert isinstance(Add(A(i), B(i)), TensAdd)
assert isinstance(expand(A(i)+B(i)), TensAdd)
expr = A(i)*(A(-i)+B(-i))
assert expr.args == (A(L_0), A(-L_0) + B(-L_0))
assert expr != A(i)*A(-i) + A(i)*B(-i)
assert expr.expand() == A(i)*A(-i) + A(i)*B(-i)
assert str(expr) == "A(L_0)*(A(-L_0) + B(-L_0))"
expr = A(i)*A(j) + A(i)*B(j)
assert str(expr) == "A(i)*A(j) + A(i)*B(j)"
expr = A(-i)*(A(i)*A(j) + A(i)*B(j)*C(k)*C(-k))
assert expr != A(-i)*A(i)*A(j) + A(-i)*A(i)*B(j)*C(k)*C(-k)
assert expr.expand() == A(-i)*A(i)*A(j) + A(-i)*A(i)*B(j)*C(k)*C(-k)
assert str(expr) == "A(-L_0)*(A(L_0)*A(j) + A(L_0)*B(j)*C(L_1)*C(-L_1))"
assert str(expr.canon_bp()) == 'A(L_0)*A(-L_0)*B(j)*C(L_1)*C(-L_1) + A(j)*A(L_0)*A(-L_0)'
expr = A(-i)*(2*A(i)*A(j) + A(i)*B(j))
assert expr.expand() == 2*A(-i)*A(i)*A(j) + A(-i)*A(i)*B(j)
expr = 2*A(i)*A(-i)
assert expr.coeff == 2
expr = A(i)*(B(j)*C(k) + C(j)*(A(k) + D(k)))
assert str(expr) == "A(i)*(B(j)*C(k) + C(j)*(A(k) + D(k)))"
assert str(expr.expand()) == "A(i)*B(j)*C(k) + A(i)*C(j)*A(k) + A(i)*C(j)*D(k)"
assert isinstance(TensMul(3), TensMul)
tm = TensMul(3).doit()
assert tm == 3
assert isinstance(tm, Integer)
p1 = B(j)*B(-j) + B(j)*C(-j)
p2 = C(-i)*p1
p3 = A(i)*p2
assert p3.expand() == A(i)*C(-i)*B(j)*B(-j) + A(i)*C(-i)*B(j)*C(-j)
expr = A(i)*(B(-i) + C(-i)*(B(j)*B(-j) + B(j)*C(-j)))
assert expr.expand() == A(i)*B(-i) + A(i)*C(-i)*B(j)*B(-j) + A(i)*C(-i)*B(j)*C(-j)
expr = C(-i)*(B(j)*B(-j) + B(j)*C(-j))
assert expr.expand() == C(-i)*B(j)*B(-j) + C(-i)*B(j)*C(-j)
def test_tensor_alternative_construction():
L = TensorIndexType("L")
i0, i1, i2, i3 = tensor_indices('i0:4', L)
A = TensorHead("A", [L])
x, y = symbols("x y")
assert A(i0) == A(Symbol("i0"))
assert A(-i0) == A(-Symbol("i0"))
raises(TypeError, lambda: A(x+y))
raises(ValueError, lambda: A(2*x))
def test_tensor_replacement():
L = TensorIndexType("L")
L2 = TensorIndexType("L2", dim=2)
i, j, k, l = tensor_indices("i j k l", L)
A, B, C, D = tensor_heads("A B C D", [L])
H = TensorHead("H", [L, L])
K = TensorHead("K", [L]*4)
expr = H(i, j)
repl = {H(i,-j): [[1,2],[3,4]], L: diag(1, -1)}
assert expr._extract_data(repl) == ([i, j], Array([[1, -2], [3, -4]]))
assert expr.replace_with_arrays(repl) == Array([[1, -2], [3, -4]])
assert expr.replace_with_arrays(repl, [i, j]) == Array([[1, -2], [3, -4]])
assert expr.replace_with_arrays(repl, [i, -j]) == Array([[1, 2], [3, 4]])
assert expr.replace_with_arrays(repl, [-i, j]) == Array([[1, -2], [-3, 4]])
assert expr.replace_with_arrays(repl, [-i, -j]) == Array([[1, 2], [-3, -4]])
assert expr.replace_with_arrays(repl, [j, i]) == Array([[1, 3], [-2, -4]])
assert expr.replace_with_arrays(repl, [j, -i]) == Array([[1, -3], [-2, 4]])
assert expr.replace_with_arrays(repl, [-j, i]) == Array([[1, 3], [2, 4]])
assert expr.replace_with_arrays(repl, [-j, -i]) == Array([[1, -3], [2, -4]])
# Test stability of optional parameter 'indices'
assert expr.replace_with_arrays(repl) == Array([[1, -2], [3, -4]])
expr = H(i,j)
repl = {H(i,j): [[1,2],[3,4]], L: diag(1, -1)}
assert expr._extract_data(repl) == ([i, j], Array([[1, 2], [3, 4]]))
assert expr.replace_with_arrays(repl) == Array([[1, 2], [3, 4]])
assert expr.replace_with_arrays(repl, [i, j]) == Array([[1, 2], [3, 4]])
assert expr.replace_with_arrays(repl, [i, -j]) == Array([[1, -2], [3, -4]])
assert expr.replace_with_arrays(repl, [-i, j]) == Array([[1, 2], [-3, -4]])
assert expr.replace_with_arrays(repl, [-i, -j]) == Array([[1, -2], [-3, 4]])
assert expr.replace_with_arrays(repl, [j, i]) == Array([[1, 3], [2, 4]])
assert expr.replace_with_arrays(repl, [j, -i]) == Array([[1, -3], [2, -4]])
assert expr.replace_with_arrays(repl, [-j, i]) == Array([[1, 3], [-2, -4]])
assert expr.replace_with_arrays(repl, [-j, -i]) == Array([[1, -3], [-2, 4]])
# Not the same indices:
expr = H(i,k)
repl = {H(i,j): [[1,2],[3,4]], L: diag(1, -1)}
assert expr._extract_data(repl) == ([i, k], Array([[1, 2], [3, 4]]))
expr = A(i)*A(-i)
repl = {A(i): [1,2], L: diag(1, -1)}
assert expr._extract_data(repl) == ([], -3)
assert expr.replace_with_arrays(repl, []) == -3
expr = K(i, j, -j, k)*A(-i)*A(-k)
repl = {A(i): [1, 2], K(i,j,k,l): Array([1]*2**4).reshape(2,2,2,2), L: diag(1, -1)}
assert expr._extract_data(repl)
expr = H(j, k)
repl = {H(i,j): [[1,2],[3,4]], L: diag(1, -1)}
raises(ValueError, lambda: expr._extract_data(repl))
expr = A(i)
repl = {B(i): [1, 2]}
raises(ValueError, lambda: expr._extract_data(repl))
expr = A(i)
repl = {A(i): [[1, 2], [3, 4]]}
raises(ValueError, lambda: expr._extract_data(repl))
# TensAdd:
expr = A(k)*H(i, j) + B(k)*H(i, j)
repl = {A(k): [1], B(k): [1], H(i, j): [[1, 2],[3,4]], L:diag(1,1)}
assert expr._extract_data(repl) == ([k, i, j], Array([[[2, 4], [6, 8]]]))
assert expr.replace_with_arrays(repl, [k, i, j]) == Array([[[2, 4], [6, 8]]])
assert expr.replace_with_arrays(repl, [k, j, i]) == Array([[[2, 6], [4, 8]]])
expr = A(k)*A(-k) + 100
repl = {A(k): [2, 3], L: diag(1, 1)}
assert expr.replace_with_arrays(repl, []) == 113
## Symmetrization:
expr = H(i, j) + H(j, i)
repl = {H(i, j): [[1, 2], [3, 4]]}
assert expr._extract_data(repl) == ([i, j], Array([[2, 5], [5, 8]]))
assert expr.replace_with_arrays(repl, [i, j]) == Array([[2, 5], [5, 8]])
assert expr.replace_with_arrays(repl, [j, i]) == Array([[2, 5], [5, 8]])
## Anti-symmetrization:
expr = H(i, j) - H(j, i)
repl = {H(i, j): [[1, 2], [3, 4]]}
assert expr.replace_with_arrays(repl, [i, j]) == Array([[0, -1], [1, 0]])
assert expr.replace_with_arrays(repl, [j, i]) == Array([[0, 1], [-1, 0]])
# Tensors with contractions in replacements:
expr = K(i, j, k, -k)
repl = {K(i, j, k, -k): [[1, 2], [3, 4]]}
assert expr._extract_data(repl) == ([i, j], Array([[1, 2], [3, 4]]))
expr = H(i, -i)
repl = {H(i, -i): 42}
assert expr._extract_data(repl) == ([], 42)
# Replace with array, raise exception if indices are not compatible:
expr = A(i)*A(j)
repl = {A(i): [1, 2]}
raises(ValueError, lambda: expr.replace_with_arrays(repl, [j]))
# Raise exception if array dimension is not compatible:
expr = A(i)
repl = {A(i): [[1, 2]]}
raises(ValueError, lambda: expr.replace_with_arrays(repl, [i]))
# TensorIndexType with dimension, wrong dimension in replacement array:
u1, u2, u3 = tensor_indices("u1:4", L2)
U = TensorHead("U", [L2])
expr = U(u1)*U(-u2)
repl = {U(u1): [[1]]}
raises(ValueError, lambda: expr.replace_with_arrays(repl, [u1, -u2]))
def test_rewrite_tensor_to_Indexed():
L = TensorIndexType("L", dim=4)
A = TensorHead("A", [L]*4)
B = TensorHead("B", [L])
i0, i1, i2, i3 = symbols("i0:4")
L_0, L_1 = symbols("L_0:2")
a1 = A(i0, i1, i2, i3)
assert a1.rewrite(Indexed) == Indexed(Symbol("A"), i0, i1, i2, i3)
a2 = A(i0, -i0, i2, i3)
assert a2.rewrite(Indexed) == Sum(Indexed(Symbol("A"), L_0, L_0, i2, i3), (L_0, 0, 3))
a3 = a2 + A(i2, i3, i0, -i0)
assert a3.rewrite(Indexed) == \
Sum(Indexed(Symbol("A"), L_0, L_0, i2, i3), (L_0, 0, 3)) +\
Sum(Indexed(Symbol("A"), i2, i3, L_0, L_0), (L_0, 0, 3))
b1 = B(-i0)*a1
assert b1.rewrite(Indexed) == Sum(Indexed(Symbol("B"), L_0)*Indexed(Symbol("A"), L_0, i1, i2, i3), (L_0, 0, 3))
b2 = B(-i3)*a2
assert b2.rewrite(Indexed) == Sum(Indexed(Symbol("B"), L_1)*Indexed(Symbol("A"), L_0, L_0, i2, L_1), (L_0, 0, 3), (L_1, 0, 3))
def test_tensorsymmetry():
with warns_deprecated_sympy():
tensorsymmetry([1]*2)
def test_tensorhead():
with warns_deprecated_sympy():
tensorhead('A', [])
def test_TensorType():
with warns_deprecated_sympy():
sym2 = TensorSymmetry.fully_symmetric(2)
Lorentz = TensorIndexType('Lorentz')
S2 = TensorType([Lorentz]*2, sym2)
assert isinstance(S2, TensorType)
| 35.695233 | 171 | 0.55017 |
4a27516fed55d3054a4c4bf1821eb628c1b92edd | 2,405 | py | Python | src/firebolt/db/connection.py | firebolt-db/firebolt-python-sdk | 35154a5ce070e71138d8573ce4c97891e212a939 | [
"Apache-2.0"
] | 4 | 2022-01-29T19:21:52.000Z | 2022-03-25T20:48:14.000Z | src/firebolt/db/connection.py | firebolt-db/firebolt-python-sdk | 35154a5ce070e71138d8573ce4c97891e212a939 | [
"Apache-2.0"
] | 92 | 2021-11-19T18:15:15.000Z | 2022-03-30T11:52:06.000Z | src/firebolt/db/connection.py | firebolt-db/firebolt-python-sdk | 35154a5ce070e71138d8573ce4c97891e212a939 | [
"Apache-2.0"
] | 6 | 2021-11-22T22:04:20.000Z | 2022-02-18T15:30:23.000Z | from __future__ import annotations
from functools import wraps
from types import TracebackType
from typing import Any
from warnings import warn
from readerwriterlock.rwlock import RWLockWrite
from firebolt.async_db.connection import BaseConnection as AsyncBaseConnection
from firebolt.async_db.connection import async_connect_factory
from firebolt.db.cursor import Cursor
from firebolt.utils.exception import ConnectionClosedError
from firebolt.utils.util import AsyncJobThread, async_to_sync
DEFAULT_TIMEOUT_SECONDS: int = 5
class Connection(AsyncBaseConnection):
"""
Firebolt database connection class. Implements PEP-249.
Args:
engine_url: Firebolt database engine REST API url
database: Firebolt database name
username: Firebolt account username
password: Firebolt account password
api_endpoint: Optional. Firebolt API endpoint. Used for authentication.
Note:
Firebolt currenly doesn't support transactions so commit and rollback methods
are not implemented.
"""
__slots__ = AsyncBaseConnection.__slots__ + ("_closing_lock", "_async_job_thread")
cursor_class = Cursor
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# Holding this lock for write means that connection is closing itself.
# cursor() should hold this lock for read to read/write state
self._closing_lock = RWLockWrite()
self._async_job_thread = AsyncJobThread()
def cursor(self) -> Cursor:
with self._closing_lock.gen_rlock():
c = super()._cursor(async_job_thread=self._async_job_thread)
assert isinstance(c, Cursor) # typecheck
return c
@wraps(AsyncBaseConnection._aclose)
def close(self) -> None:
with self._closing_lock.gen_wlock():
async_to_sync(self._aclose, self._async_job_thread)()
# Context manager support
def __enter__(self) -> Connection:
if self.closed:
raise ConnectionClosedError("Connection is already closed.")
return self
def __exit__(
self, exc_type: type, exc_val: Exception, exc_tb: TracebackType
) -> None:
self.close()
def __del__(self) -> None:
if not self.closed:
warn(f"Unclosed {self!r}", UserWarning)
connect = async_to_sync(async_connect_factory(Connection))
| 32.066667 | 86 | 0.708524 |
4a27519c41792019a8bb0e1fd2709ef021c726b3 | 426 | py | Python | data/scripts/templates/object/intangible/pet/shared_peko_peko_hue.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/intangible/pet/shared_peko_peko_hue.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/intangible/pet/shared_peko_peko_hue.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/intangible/pet/shared_peko_peko_hue.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 25.058824 | 67 | 0.713615 |
4a275209c1a77420b2e277e438ceb78b3db7e5ae | 1,730 | py | Python | utils.py | cnguyen10/similarity_classification_tasks | a8ecf253ed1b23bc246ba5f8880a8a18e92f3bff | [
"MIT"
] | null | null | null | utils.py | cnguyen10/similarity_classification_tasks | a8ecf253ed1b23bc246ba5f8880a8a18e92f3bff | [
"MIT"
] | null | null | null | utils.py | cnguyen10/similarity_classification_tasks | a8ecf253ed1b23bc246ba5f8880a8a18e92f3bff | [
"MIT"
] | null | null | null | import torch
import numpy as np
import os
import sys
import pickle
from PIL import Image
import typing
def list_dir(root: str, prefix: bool = False) -> typing.List[str]:
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = [p for p in os.listdir(root) if os.path.isdir(os.path.join(root, p))]
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root: str, suffix: str, prefix: bool = False) -> typing.List[str]:
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = [p for p in os.listdir(root) if os.path.isfile(os.path.join(root, p)) and p.endswith(suffix)]
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def expected_log_dirichlet(concentration: torch.Tensor) -> torch.Tensor:
"""Calculate the expected log of a Dirichlet distribution
"""
return torch.digamma(input=concentration) - torch.digamma(input=torch.sum(input=concentration, dim=-1, keepdim=True)) | 39.318182 | 121 | 0.675145 |
4a27524cfec7a5568a1b5cb287a6322330bcea88 | 971 | py | Python | xlsxwriter/test/comparison/test_table22.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_table22.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_table22.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('table22.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
data = [
['apple', 'pie'],
['pine', 'tree'],
]
worksheet.set_column('B:C', 10.288)
worksheet.add_table('B2:C3', {'data': data, 'header_row': False})
workbook.close()
self.assertExcelEqual()
| 23.119048 | 79 | 0.580844 |
4a2754a50ca140a14e986a0719ddb97e42260ecf | 1,562 | py | Python | kanshu/merge_txt_files.py | hustbill/python-crawler | 7d736b9a3c6d031a9a7e37c74ae3bd417534fb06 | [
"MIT"
] | null | null | null | kanshu/merge_txt_files.py | hustbill/python-crawler | 7d736b9a3c6d031a9a7e37c74ae3bd417534fb06 | [
"MIT"
] | null | null | null | kanshu/merge_txt_files.py | hustbill/python-crawler | 7d736b9a3c6d031a9a7e37c74ae3bd417534fb06 | [
"MIT"
] | null | null | null |
import os
import glob
import re
# Ref: https://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside
def atof(text):
try:
retval = float(text)
except ValueError:
retval = text
return retval
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
float regex comes from https://stackoverflow.com/a/12643073/190597
'''
return [atof(c) for c in re.split(r'[+-]?([0-9]+(?:[.][0-9]*)?|[.][0-9]+)', text)]
def merge_txt_files(txtPath,outputFile):
print("merge_txt_files: ")
read_files = glob.glob(txtPath + "/*.txt")
# sorted_files = sorted(read_files)
# correctly sort a string with a number inside
read_files.sort(key=natural_keys)
# print (read_files[1:20])
# sub_files = read_files[1:20]
sub_files = read_files
with open(outputFile, "wb") as outfile:
for f in sub_files:
with open(f, "rb") as infile:
print(f)
# outfile.write(f.encode('gbk'))
# outfile.write(f)
base = os.path.basename(f)
file_name = os.path.splitext(base)[0]
section_name = ('\n\n ' + file_name + ' \n\n').encode()
outfile.write(section_name)
outfile.write(infile.read())
if __name__ == "__main__":
outputFile = "冷枪-中日两支特种小分队生死对决-吴超著.txt"
merge_txt_files("./txt_files", outputFile) | 31.24 | 102 | 0.612036 |
4a2755168326d2d2e2f9c332581bcb8de167d5df | 1,402 | py | Python | library/books/book_descriptors.py | HoldYourBreath/Library | 776d4e31beb3ce976c23dc423c88839435bf26ae | [
"MIT"
] | 2 | 2017-04-29T07:27:04.000Z | 2017-06-17T14:49:14.000Z | library/books/book_descriptors.py | HoldYourBreath/Library | 776d4e31beb3ce976c23dc423c88839435bf26ae | [
"MIT"
] | 45 | 2017-06-16T13:34:45.000Z | 2022-03-08T22:46:52.000Z | library/books/book_descriptors.py | HoldYourBreath/Library | 776d4e31beb3ce976c23dc423c88839435bf26ae | [
"MIT"
] | null | null | null | import library.database as database
from library.books.book_descriptor import BookDescriptor
class BookDescriptors:
def __init__(self):
self.books = []
def marshal(self):
return [book.marshal() for book in self.books]
@staticmethod
def get(search_params={}):
db = database.get()
sub_query_books = 'SELECT isbn, COUNT(book_id) AS num_copies ' \
'FROM books GROUP BY isbn'
sub_query_authors = 'SELECT isbn, GROUP_CONCAT(name) AS authors ' \
'FROM authors GROUP BY isbn'
query = 'SELECT book_descriptors.*, books.num_copies, ' \
'authors.authors FROM book_descriptors ' \
'LEFT JOIN ({}) books USING (isbn) ' \
'LEFT JOIN ({}) authors USING (isbn)' \
.format(sub_query_books, sub_query_authors)
curs = db.execute(query)
books = BookDescriptors()
for book in curs.fetchall():
if not book['isbn']:
# No books found
return books
authors = book['authors']
book = dict(book)
if authors:
book['authors'] = [author for author in authors.split(',')]
else:
book['authors'] = []
book = BookDescriptor(**book)
books.books.append(book)
return books
| 34.195122 | 75 | 0.544936 |
4a2755b80c5c2cdeb9ae85e867ecee13ef8a0c99 | 2,544 | py | Python | resources/migrations/0005_add_map_field.py | City-of-Helsinki/berth-reservations | a3b1a8c2176f132505527acdf6da3a62199401db | [
"MIT"
] | 3 | 2020-10-13T07:58:48.000Z | 2020-12-22T09:41:50.000Z | resources/migrations/0005_add_map_field.py | City-of-Helsinki/berth-reservations | a3b1a8c2176f132505527acdf6da3a62199401db | [
"MIT"
] | 422 | 2018-10-25T10:57:05.000Z | 2022-03-30T05:47:14.000Z | resources/migrations/0005_add_map_field.py | City-of-Helsinki/berth-reservations | a3b1a8c2176f132505527acdf6da3a62199401db | [
"MIT"
] | 1 | 2020-04-03T07:38:03.000Z | 2020-04-03T07:38:03.000Z | # Generated by Django 2.2.6 on 2020-03-06 08:44
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
import resources.models
import uuid
class Migration(migrations.Migration):
dependencies = [
("resources", "0004_add_depth_to_berth_type"),
]
operations = [
migrations.CreateModel(
name="WinterStorageAreaMap",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
(
"map_file",
models.FileField(
storage=django.core.files.storage.FileSystemStorage(),
upload_to=None,
verbose_name="map file",
),
),
(
"winter_storage_area",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="maps",
to="resources.WinterStorageArea",
verbose_name="winter storage area",
),
),
],
options={"abstract": False,},
),
migrations.CreateModel(
name="HarborMap",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
(
"map_file",
models.FileField(
storage=django.core.files.storage.FileSystemStorage(),
upload_to=None,
verbose_name="map file",
),
),
(
"harbor",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="maps",
to="resources.Harbor",
verbose_name="harbor",
),
),
],
options={"abstract": False,},
),
]
| 31.02439 | 78 | 0.387972 |
4a2755d462f429a54d8d093f181b2e4e0c348950 | 7,440 | py | Python | test/client_test.py | mengibin/nacos-sdk-python | f929d983f62904efdbbda3b5dd148905478bfa08 | [
"Apache-2.0"
] | null | null | null | test/client_test.py | mengibin/nacos-sdk-python | f929d983f62904efdbbda3b5dd148905478bfa08 | [
"Apache-2.0"
] | null | null | null | test/client_test.py | mengibin/nacos-sdk-python | f929d983f62904efdbbda3b5dd148905478bfa08 | [
"Apache-2.0"
] | 1 | 2020-11-05T09:01:30.000Z | 2020-11-05T09:01:30.000Z | # -*- coding: utf8 -*-
from __future__ import print_function
import unittest
import nacos
from nacos import files
import time
import shutil
SERVER_1 = "100.69.207.65"
SERVER_ADDRESSES = "%s:8848, 100.69.207.66:8848" % SERVER_1
NAMESPACE = "6cface1f-2f1b-4744-a59d-fd818b91a799"
# Set the following values if authentication mode is enabled on the server
USERNAME = None
PASSWORD = None
client = nacos.NacosClient(SERVER_ADDRESSES, namespace=NAMESPACE, username=USERNAME, password=PASSWORD)
class TestClient(unittest.TestCase):
def test_get_server(self):
self.assertEqual(client.get_server(), (SERVER_1, 8848))
def test_set_get_remove_config(self):
d = "test"
g = "DEFAULT_GROUP"
content = u"test中文"
self.assertEqual(client.publish_config(d, g, content), True)
time.sleep(0.5)
self.assertEqual(client.get_config(d, g), content)
self.assertEqual(client.remove_config(d, g), True)
time.sleep(0.5)
self.assertEqual(client.get_config(d, g), None)
def test_server_failover(self):
client2 = nacos.NacosClient("100.69.207.66:8848, %s:8848" %SERVER_1, namespace=NAMESPACE, username=USERNAME, password=PASSWORD)
d = "test"
g = "DEFAULT_GROUP"
content = u"test中文"
self.assertEqual(client2.publish_config(d, g, content), True)
time.sleep(0.5)
self.assertEqual(client2.get_config(d, g), content)
self.assertEqual(client2.remove_config(d, g), True)
time.sleep(0.5)
self.assertEqual(client2.get_config(d, g), None)
def test_fake_watcher(self):
d = "test"
g = "DEFAULT_GROUP"
class Share:
content = None
count = 0
cache_key = "+".join([d, g, NAMESPACE])
def test_cb(args):
print(args)
Share.count += 1
Share.content = args["content"]
client.add_config_watcher(d, g, test_cb)
client.add_config_watcher(d, g, test_cb)
client.add_config_watcher(d, g, test_cb)
time.sleep(1)
client.notify_queue.put((cache_key, "xxx", "md51"))
time.sleep(1)
self.assertEqual(Share.content, "xxx")
self.assertEqual(Share.count, 3)
client.remove_config_watcher(d, g, test_cb)
Share.count = 0
client.notify_queue.put((cache_key, "yyy", "md52"))
time.sleep(1)
self.assertEqual(Share.content, "yyy")
self.assertEqual(Share.count, 2)
client.remove_config_watcher(d, g, test_cb, True)
Share.count = 0
client.notify_queue.put((cache_key, "not effective, no watchers", "md53"))
time.sleep(1)
self.assertEqual(Share.content, "yyy")
self.assertEqual(Share.count, 0)
Share.count = 0
client.add_config_watcher(d, g, test_cb)
time.sleep(1)
client.notify_queue.put((cache_key, "zzz", "md54"))
time.sleep(1)
self.assertEqual(Share.content, "zzz")
self.assertEqual(Share.count, 1)
Share.count = 0
client.notify_queue.put((cache_key, "not effective, md5 no changes", "md54"))
time.sleep(1)
self.assertEqual(Share.content, "zzz")
self.assertEqual(Share.count, 0)
def test_long_pulling(self):
client2 = nacos.NacosClient(SERVER_ADDRESSES, username=USERNAME, password=PASSWORD)
d = "test1_pulling"
g = "Group1"
g2 = "Group2"
class Share:
content = None
def cb(x):
Share.content = x["content"]
print(Share.content)
client2.publish_config(d, g, "test2")
client2.publish_config(d, g2, "test2")
time.sleep(0.5)
# test common
client2.add_config_watcher(d, g, cb)
client2.add_config_watcher(d, g2, cb)
time.sleep(0.5)
client2.publish_config(d, g, "test")
client2.publish_config(d, g2, "test")
time.sleep(1)
self.assertEqual(Share.content, "test")
client2.publish_config(d, g2, u"test2中文")
time.sleep(1)
self.assertEqual(Share.content, u"test2中文")
def test_get_from_failover(self):
d = "test_fo"
g = "group"
key = "+".join([d, g, NAMESPACE])
files.save_file(client.failover_base, key, u"xxx中文")
print(client.get_config(d, g))
self.assertEqual(client.get_config(d, g), u"xxx中文")
shutil.rmtree(client.failover_base)
def test_get_from_snapshot(self):
client2 = nacos.NacosClient(SERVER_ADDRESSES, namespace=NAMESPACE, username=USERNAME, password=PASSWORD)
client2.current_server = ("1.100.84.215", 8080)
d = "test_snap"
g = "group"
key = "+".join([d, g, NAMESPACE])
files.save_file(client2.snapshot_base, key, u"yyy中文")
self.assertEqual(client2.get_config(d, g), u"yyy中文")
shutil.rmtree(client2.snapshot_base)
def test_add_naming_instance(self):
self.assertEqual(
client.add_naming_instance("test.service", "1.0.0.1", 8080, "testCluster2", 0.1, "{}", False, True), True)
def test_add_naming_instance_with_dict_metadata(self):
self.assertEqual(
client.add_naming_instance("test.service", "1.0.0.1", 8080, "testCluster2", 0.1, {"a":"c"}, False, True),
True)
def test_remove_naming_instance(self):
print(client.remove_naming_instance("test.service", "1.0.0.1", 8080))
def test_modify_naming_instance(self):
self.assertEqual(
client.modify_naming_instance("test.service", "1.0.0.1", 8080, cluster_name="testCluster", enable=False,
metadata='{"a":"a"}'), True)
def test_modify_naming_instance_with_dict_metadata(self):
self.assertEqual(
client.modify_naming_instance("test.service", "1.0.0.1", 8080, cluster_name="testCluster", enable=False,
metadata={"a":"b"}), True)
def test_list_naming_instance_offline(self):
client.add_naming_instance("test.service", "1.0.0.1", 8080, "testCluster2", 0.1, "{}", False, True)
self.assertEqual(len(client.list_naming_instance("test.service")["hosts"]), 0)
def test_list_naming_instance_online(self):
client.add_naming_instance("test.service", "1.0.0.1", 8080, "testCluster2", 0.1, "{}", True, True)
self.assertEqual(len(client.list_naming_instance("test.service")["hosts"]), 1)
def test_get_naming_instance(self):
client.add_naming_instance("test.service", "1.0.0.1", 8080, "testCluster2", 0.1, "{}", False, True)
self.assertEqual(client.get_naming_instance("test.service", "1.0.0.1", 8080, "testCluster2")['ip'], u'1.0.0.1')
def test_send_heartbeat(self):
client.add_naming_instance("test.service", "1.0.0.1", 8080, "testCluster2", 0.1, "{}", False, True)
self.assertEqual(
client.send_heartbeat("test.service", "1.0.0.1", 8080, "testCluster2", 0.1, "{}")["clientBeatInterval"] > 0,
True)
def test_send_heartbeat_with_dict_metadata(self):
client.add_naming_instance("test.service", "1.0.0.1", 8080, "testCluster2", 0.1, {"a":"c"}, False, True)
self.assertEqual(
client.send_heartbeat("test.service", "1.0.0.1", 8080, "testCluster2", 0.1, {"a":"c"})["clientBeatInterval"] > 0,
True)
if __name__ == '__main__':
unittest.main()
| 37.959184 | 135 | 0.624866 |
4a27563d132b3e77f97bdad802c5d4b9ad436078 | 1,893 | py | Python | modules/tools/navigator/navigator.py | XiaoyuMo92/apollo | ee7bbef1b5afe9caa94bdc8033eed5d0ee3779aa | [
"Apache-2.0"
] | null | null | null | modules/tools/navigator/navigator.py | XiaoyuMo92/apollo | ee7bbef1b5afe9caa94bdc8033eed5d0ee3779aa | [
"Apache-2.0"
] | 2 | 2022-01-27T16:14:38.000Z | 2022-02-11T00:10:28.000Z | modules/tools/navigator/navigator.py | XiaoyuMo92/apollo | ee7bbef1b5afe9caa94bdc8033eed5d0ee3779aa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import rospy
import sys
import json
from modules.map.relative_map.proto import navigation_pb2
if __name__ == '__main__':
fdata = sys.argv[1]
rospy.init_node("navigator3", anonymous=True)
navigation_pub = rospy.Publisher(
"/apollo/navigation",
navigation_pb2.NavigationInfo,
queue_size=1)
navigation_info = navigation_pb2.NavigationInfo()
navigation_path = navigation_info.navigation_path.add()
navigation_path.path_priority = 0
navigation_path.path.name = "navigation"
f = open(fdata, 'r')
for line in f:
seg = json.loads(line)
for i in range(len(seg['s'])):
point = navigation_path.path.path_point.add()
point.x = seg['x'][i]
point.y = seg['y'][i]
point.s = seg['s'][i]
point.theta = seg['theta'][i]
point.kappa = seg['kappa'][i]
point.dkappa = seg['dkappa'][i]
f.close()
print navigation_info
r = rospy.Rate(1) # 1hz
while not rospy.is_shutdown():
r.sleep()
navigation_pub.publish(navigation_info)
break
| 35.055556 | 79 | 0.611199 |
4a275690686fae92756ab8a3eee6b1b4120cc9c0 | 1,209 | py | Python | river/tree/_attribute_test/nominal_attribute_binary_test.py | brcharron/creme | 25290780f6bba0eb030215194e81b120d0219389 | [
"BSD-3-Clause"
] | 1 | 2020-12-04T18:56:19.000Z | 2020-12-04T18:56:19.000Z | river/tree/_attribute_test/nominal_attribute_binary_test.py | brcharron/creme | 25290780f6bba0eb030215194e81b120d0219389 | [
"BSD-3-Clause"
] | null | null | null | river/tree/_attribute_test/nominal_attribute_binary_test.py | brcharron/creme | 25290780f6bba0eb030215194e81b120d0219389 | [
"BSD-3-Clause"
] | null | null | null | from .instance_conditional_test import InstanceConditionalTest
class NominalAttributeBinaryTest(InstanceConditionalTest):
"""Implement binary split tests for categorical features.
The resulting test considers two branches: one encompassing a specific
feature value, and another for the remaining cases.
Parameters
----------
att_idx
The id of the attribute.
att_value
The categorical value of the feature to test.
"""
def __init__(self, att_idx, att_value):
super().__init__()
self._att_idx = att_idx
self._att_value = att_value
def branch_for_instance(self, x):
try:
return 0 if x[self._att_idx] == self._att_value else 1
except KeyError:
return -1
@staticmethod
def max_branches():
return 2
def describe_condition_for_branch(self, branch, shorten=False):
condition = ' = ' if branch == 0 else ' != '
if shorten:
return f"{condition}{self._att_value}"
else:
return f"{self._att_idx}{condition}{self._att_value}"
def attrs_test_depends_on(self):
return [self._att_idx]
| 29.487805 | 78 | 0.630273 |
4a275706a79050b9179b731a6d0d84a6e13fbc93 | 2,289 | py | Python | swmmio/__main__.py | jennwuu/swmmio | 6918ecfb69c10333cbc65ce0ab6554f8a04ef8f9 | [
"MIT"
] | 76 | 2016-04-26T14:04:02.000Z | 2022-03-24T10:10:29.000Z | swmmio/__main__.py | Kimi-Monica/swmmio | 54dd6c1f7a3e47db5702b1f703beca0a8945a250 | [
"MIT"
] | 94 | 2016-05-06T15:32:51.000Z | 2022-02-10T08:03:30.000Z | swmmio/__main__.py | Kimi-Monica/swmmio | 54dd6c1f7a3e47db5702b1f703beca0a8945a250 | [
"MIT"
] | 26 | 2016-09-01T22:51:47.000Z | 2022-02-09T09:13:23.000Z | from swmmio.run_models.run import run_simple, run_hot_start_sequence
from swmmio.run_models import start_pool
from swmmio import Model
from itertools import chain
import os
import argparse
from multiprocessing import Pool, cpu_count
from datetime import datetime
#parse the arguments
parser = argparse.ArgumentParser(description='Process some stuff')
parser.add_argument('-r', '--run', dest='model_to_run', nargs="+")
parser.add_argument('-rhs', '--run_hotstart', dest='hotstart_model_to_run', nargs="+")
parser.add_argument('-sp', '--start_pool', dest='start_pool', nargs="+")
parser.add_argument('-cores_left', '--cores_left', dest='cores_left', default=4, type=int)
parser.add_argument('-pp', '--post_process', dest='post_process', nargs="+")
args = parser.parse_args()
wd = os.getcwd() #current directory script is being called from
if args.model_to_run is not None:
models_paths = [os.path.join(wd, f) for f in args.model_to_run]
print('Adding models to queue:\n\t{}'.format('\n\t'.join(models_paths)))
#run the models in series (one after the other)
list(map(run_simple, models_paths))
# run_simple(args.model_to_run)
elif args.hotstart_model_to_run is not None:
models_paths = [os.path.join(wd, f) for f in args.hotstart_model_to_run]
print('hotstart_model_to_run the model: {}'.format(args.hotstart_model_to_run))
# m = Model(args.hotstart_model_to_run)
# run_hot_start_sequence(m)#args.hotstart_model_to_run)
list(map(run_hot_start_sequence, models_paths))
elif args.start_pool is not None:
models_dirs = [os.path.join(wd, f) for f in args.start_pool]
print('Searching for models in:\n\t{}'.format('\n\t'.join(models_dirs)))
#combine the segments and options (combinations) into one iterable
inp_paths = []
for root, dirs, files in chain.from_iterable(os.walk(path) for path in models_dirs):
for f in files:
if f.endswith('.inp') and 'bk' not in root:
#we've found a directory containing an inp
inp_paths.append(os.path.join(root, f))
#call the main() function in start_pool.py
start_pool.main(inp_paths, args.cores_left)
print("swmmio has completed running {} models".format(len(inp_paths)))
else:
print('you need to pass in some args')
| 38.79661 | 90 | 0.719965 |
4a2757df18c3a10db89c859de214132cdd08ef92 | 48 | py | Python | tests/__init__.py | vishalbelsare/drug2ways | 9ddac00c221e4c0c785dcce2e5360624ead1575b | [
"Apache-2.0"
] | 21 | 2020-05-27T21:23:09.000Z | 2022-02-09T13:42:29.000Z | tests/__init__.py | vishalbelsare/drug2ways | 9ddac00c221e4c0c785dcce2e5360624ead1575b | [
"Apache-2.0"
] | 8 | 2020-05-29T20:46:21.000Z | 2021-10-13T13:35:59.000Z | tests/__init__.py | vishalbelsare/drug2ways | 9ddac00c221e4c0c785dcce2e5360624ead1575b | [
"Apache-2.0"
] | 6 | 2020-07-15T14:09:48.000Z | 2022-03-30T12:27:53.000Z | # -*- coding: utf-8 -*-
"""drug2ways tests."""
| 12 | 23 | 0.5 |
4a275945fc5f750571e7af4f4e2d2d0eec387687 | 362 | py | Python | never_saiddit/core/urls.py | Damgaard/Never-Saiddit | d2b0bac0a39da0f21d8a0e5ed46094786615c41f | [
"MIT"
] | null | null | null | never_saiddit/core/urls.py | Damgaard/Never-Saiddit | d2b0bac0a39da0f21d8a0e5ed46094786615c41f | [
"MIT"
] | null | null | null | never_saiddit/core/urls.py | Damgaard/Never-Saiddit | d2b0bac0a39da0f21d8a0e5ed46094786615c41f | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^get_job_status/$', views.get_job_status, name='get_job_status'),
# Main pages
url(r'^confirmation/(?P<pk>[\w-]*)/$', views.ConfirmationView.as_view(), name="confirmation"),
url(r'^destruction/(?P<pk>[\w-]*)/$', views.DestructionView.as_view(), name="destruction"),
]
| 30.166667 | 98 | 0.668508 |
4a275aac0f57032d51c557bd2190c036a2ccaa9c | 4,676 | py | Python | great_expectations/datasource/data_connector/configured_asset_filesystem_data_connector.py | OmriBromberg/great_expectations | 60eb81ebfb08fef5d37d55c316dc962928beb165 | [
"Apache-2.0"
] | 1 | 2021-04-11T20:54:23.000Z | 2021-04-11T20:54:23.000Z | great_expectations/datasource/data_connector/configured_asset_filesystem_data_connector.py | OmriBromberg/great_expectations | 60eb81ebfb08fef5d37d55c316dc962928beb165 | [
"Apache-2.0"
] | 53 | 2021-10-02T02:26:51.000Z | 2021-12-28T20:49:25.000Z | great_expectations/datasource/data_connector/configured_asset_filesystem_data_connector.py | OmriBromberg/great_expectations | 60eb81ebfb08fef5d37d55c316dc962928beb165 | [
"Apache-2.0"
] | 1 | 2022-03-03T16:47:32.000Z | 2022-03-03T16:47:32.000Z | import logging
from pathlib import Path
from typing import List, Optional
from great_expectations.datasource.data_connector.asset import Asset
from great_expectations.datasource.data_connector.configured_asset_file_path_data_connector import (
ConfiguredAssetFilePathDataConnector,
)
from great_expectations.datasource.data_connector.util import (
get_filesystem_one_level_directory_glob_path_list,
normalize_directory_path,
)
from great_expectations.execution_engine import ExecutionEngine
logger = logging.getLogger(__name__)
class ConfiguredAssetFilesystemDataConnector(ConfiguredAssetFilePathDataConnector):
"""
Extension of ConfiguredAssetFilePathDataConnector used to connect to Filesystem
The ConfiguredAssetFilesystemDataConnector is one of two classes (InferredAssetFilesystemDataConnector being the
other one) designed for connecting to data on a filesystem. It connects to assets
defined by the `assets` configuration.
A ConfiguredAssetFilesystemDataConnector requires an explicit listing of each DataAsset you want to connect to.
This allows more fine-tuning, but also requires more setup.
"""
def __init__(
self,
name: str,
datasource_name: str,
base_directory: str,
assets: dict,
execution_engine: Optional[ExecutionEngine] = None,
default_regex: Optional[dict] = None,
glob_directive: str = "**/*",
sorters: Optional[list] = None,
batch_spec_passthrough: Optional[dict] = None,
):
"""
Base class for DataConnectors that connect to data on a filesystem. This class supports the configuration of default_regex
and sorters for filtering and sorting data_references. It takes in configured `assets` as a dictionary.
Args:
name (str): name of ConfiguredAssetFilesystemDataConnector
datasource_name (str): Name of datasource that this DataConnector is connected to
assets (dict): configured assets as a dictionary. These can each have their own regex and sorters
execution_engine (ExecutionEngine): ExecutionEngine object to actually read the data
default_regex (dict): Optional dict the filter and organize the data_references.
glob_directive (str): glob for selecting files in directory (defaults to **/*) or nested directories (e.g. */*/*.csv)
sorters (list): Optional list if you want to sort the data_references
batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec
"""
logger.debug(f'Constructing ConfiguredAssetFilesystemDataConnector "{name}".')
super().__init__(
name=name,
datasource_name=datasource_name,
assets=assets,
execution_engine=execution_engine,
default_regex=default_regex,
sorters=sorters,
batch_spec_passthrough=batch_spec_passthrough,
)
self._base_directory = base_directory
self._glob_directive = glob_directive
def _get_data_reference_list_for_asset(self, asset: Optional[Asset]) -> List[str]:
base_directory: str = self.base_directory
glob_directive: str = self._glob_directive
if asset is not None:
if asset.base_directory:
base_directory = normalize_directory_path(
dir_path=asset.base_directory, root_directory_path=base_directory
)
if asset.glob_directive:
glob_directive = asset.glob_directive
path_list: List[str] = get_filesystem_one_level_directory_glob_path_list(
base_directory_path=base_directory, glob_directive=glob_directive
)
return sorted(path_list)
def _get_full_file_path_for_asset(
self, path: str, asset: Optional[Asset] = None
) -> str:
base_directory: str = self.base_directory
if asset is not None:
if asset.base_directory:
base_directory = normalize_directory_path(
dir_path=asset.base_directory,
root_directory_path=base_directory,
)
return str(Path(base_directory).joinpath(path))
@property
def base_directory(self) -> str:
"""
Accessor method for base_directory. If directory is a relative path, interpret it as relative to the
root directory. If it is absolute, then keep as-is.
"""
return normalize_directory_path(
dir_path=self._base_directory,
root_directory_path=self.data_context_root_directory,
)
| 41.75 | 130 | 0.69568 |
4a275af21d78e08a2e03aebd9e2c2fa65929698f | 7,368 | py | Python | src/repair/project.py | violetbingzhe/angelix | 511c701a51522c0fceded153a9ec71a3695f8720 | [
"MIT"
] | 89 | 2016-02-19T10:11:47.000Z | 2022-03-30T17:04:31.000Z | src/repair/project.py | violetbingzhe/angelix | 511c701a51522c0fceded153a9ec71a3695f8720 | [
"MIT"
] | 29 | 2016-03-24T12:55:42.000Z | 2022-01-13T21:41:16.000Z | src/repair/project.py | violetbingzhe/angelix | 511c701a51522c0fceded153a9ec71a3695f8720 | [
"MIT"
] | 38 | 2016-02-21T15:39:38.000Z | 2022-03-18T02:33:33.000Z | import copy
import difflib
import os
from os.path import join, exists, relpath, basename, realpath
import shutil
import subprocess
import json
from utils import cd
import logging
import tempfile
import sys
import re
import statistics
import time
from transformation import PrintfTransformer
logger = logging.getLogger(__name__)
class CompilationError(Exception):
pass
class Project:
def __init__(self, config, dir, buggy, build_cmd, configure_cmd):
self.config = config
if self.config['verbose']:
self.subproc_output = sys.stderr
else:
self.subproc_output = subprocess.DEVNULL
self.dir = dir
self.buggy = buggy
self.build_cmd = build_cmd
self.configure_cmd = configure_cmd
def initialize(self):
if self.config['instr_printf'] is not None:
self.configure()
self.instrument_printf = PrintfTransformer(self.config)
self.instrument_printf(self, self.config['instr_printf'])
self._buggy_backup = join(self.dir, self.buggy) + '.backup'
shutil.copyfile(join(self.dir, self.buggy), self._buggy_backup)
def restore_buggy(self):
shutil.copyfile(self._buggy_backup, join(self.dir, self.buggy))
def diff_buggy(self):
with open(join(self.dir, self.buggy), encoding='latin-1') as buggy:
buggy_lines = buggy.readlines()
with open(self._buggy_backup, encoding='latin-1') as backup:
backup_lines = backup.readlines()
return difflib.unified_diff(backup_lines, buggy_lines,
fromfile=join('a', self.buggy),
tofile=join('b', self.buggy))
def import_compilation_db(self, compilation_db):
compilation_db = copy.deepcopy(compilation_db)
for item in compilation_db:
item['directory'] = join(self.dir, item['directory'])
item['file'] = join(self.dir, item['file'])
# this is a temporary hack. It general case, we need (probably) a different workflow:
wrong_dir = realpath(join(self.dir, '..', 'validation'))
item['command'] = item['command'].replace(wrong_dir, self.dir)
item['command'] = item['command'] + ' -I' + os.environ['LLVM3_INCLUDE_PATH']
# this is a hack to skip output expressions when perform transformation:
item['command'] = item['command'] + ' -include ' + os.environ['ANGELIX_RUNTIME_H']
item['command'] = item['command'] + ' -D ANGELIX_INSTRUMENTATION'
compilation_db_file = join(self.dir, 'compile_commands.json')
with open(compilation_db_file, 'w') as file:
json.dump(compilation_db, file, indent=2)
def configure(self):
compile_start_time = time.time()
src = basename(self.dir)
logger.info('configuring {} source'.format(src))
if self.configure_cmd is None:
return
with cd(self.dir):
return_code = subprocess.call(self.configure_cmd,
shell=True,
stderr=self.subproc_output,
stdout=self.subproc_output)
if return_code != 0 and not self.config['mute_warning']:
logger.warning("configuration of {} returned non-zero code".format(relpath(dir)))
compile_end_time = time.time()
compile_elapsed = compile_end_time - compile_start_time
statistics.data['time']['compilation'] += compile_elapsed
def build_in_env(dir, cmd, subproc_output, config, env=os.environ):
dirpath = tempfile.mkdtemp()
messages = join(dirpath, 'messages')
environment = dict(env)
environment['ANGELIX_COMPILER_MESSAGES'] = messages
with cd(dir):
return_code = subprocess.call(cmd,
env=environment,
shell=True,
stderr=subproc_output,
stdout=subproc_output)
if return_code != 0 and not config['mute_warning']:
logger.warning("compilation of {} returned non-zero code".format(relpath(dir)))
if exists(messages):
with open(messages) as file:
lines = file.readlines()
if not config['mute_warning']:
for line in lines:
logger.warning("failed to build {}".format(relpath(line.strip())))
def build_with_cc(dir, cmd, stderr, cc, config):
env = dict(os.environ)
env['CC'] = cc
build_in_env(dir, cmd, stderr, config, env)
class Validation(Project):
def build(self):
logger.info('building {} source'.format(basename(self.dir)))
compile_start_time = time.time()
build_in_env(self.dir, self.build_cmd,
subprocess.DEVNULL if self.config['mute_build_message']
else self.subproc_output,
self.config)
compile_end_time = time.time()
compile_elapsed = compile_end_time - compile_start_time
statistics.data['time']['compilation'] += compile_elapsed
def export_compilation_db(self):
logger.info('building json compilation database from {} source'.format(basename(self.dir)))
compile_start_time = time.time()
build_in_env(self.dir,
'bear ' + self.build_cmd,
subprocess.DEVNULL if self.config['mute_build_message']
else self.subproc_output,
self.config)
compile_end_time = time.time()
compile_elapsed = compile_end_time - compile_start_time
statistics.data['time']['compilation'] += compile_elapsed
compilation_db_file = join(self.dir, 'compile_commands.json')
with open(compilation_db_file) as file:
compilation_db = json.load(file)
# making paths relative:
for item in compilation_db:
item['directory'] = relpath(item['directory'], self.dir)
item['file'] = relpath(item['file'], self.dir)
return compilation_db
class Frontend(Project):
def build(self):
logger.info('building {} source'.format(basename(self.dir)))
compile_start_time = time.time()
build_with_cc(self.dir,
self.build_cmd,
subprocess.DEVNULL if self.config['mute_build_message']
else self.subproc_output,
'angelix-compiler --test',
self.config)
compile_end_time = time.time()
compile_elapsed = compile_end_time - compile_start_time
statistics.data['time']['compilation'] += compile_elapsed
class Backend(Project):
def build(self):
logger.info('building {} source'.format(basename(self.dir)))
compile_start_time = time.time()
build_with_cc(self.dir,
self.build_cmd,
subprocess.DEVNULL if self.config['mute_build_message']
else self.subproc_output,
'angelix-compiler --klee',
self.config)
compile_end_time = time.time()
compile_elapsed = compile_end_time - compile_start_time
statistics.data['time']['compilation'] += compile_elapsed
| 38.375 | 99 | 0.605592 |
4a275af5c3d2c609c38a4ef1ab8da66221596ed5 | 225 | py | Python | gail_airl_ppo/network/__init__.py | tarawa/imitation | 409abfba1f60bca507c44de562bed135cc9f348f | [
"MIT"
] | null | null | null | gail_airl_ppo/network/__init__.py | tarawa/imitation | 409abfba1f60bca507c44de562bed135cc9f348f | [
"MIT"
] | null | null | null | gail_airl_ppo/network/__init__.py | tarawa/imitation | 409abfba1f60bca507c44de562bed135cc9f348f | [
"MIT"
] | null | null | null | from .policy import StateDependentPolicy, StateIndependentPolicy
from .value import StateFunction, StateActionFunction, TwinnedStateActionFunction
from .disc import GAILDiscrim, AIRLDiscrim, WGAILDiscrim, WGAILDiscrim_notanh
| 56.25 | 81 | 0.88 |
4a275b8cf8f7d35c1c0d28dca053cee66cae7da3 | 1,308 | py | Python | py/test/i18n/test_ui.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | 3 | 2022-01-06T16:52:52.000Z | 2022-03-07T11:30:47.000Z | py/test/i18n/test_ui.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | null | null | null | py/test/i18n/test_ui.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | 1 | 2021-10-24T01:47:22.000Z | 2021-10-24T01:47:22.000Z | # Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module for methods of creating i18n UIs."""
from cros.factory.test.i18n import translation
def MakeI18nLabel(label):
"""Make an i18n label.
Args:
label: The string of the label, or the translation dict obtained by
``i18n._``.
Returns:
The HTML of label, that can be used in Goofy.
Example:
MakeI18nLabel(
i18n._(
'This is a label with name "{name}" and value {value}',
name=i18n._('example label name'),
value='value'))
"""
label = translation.Translated(label)
html = []
for locale in translation.LOCALES:
translated_label = label[locale]
html_class = 'goofy-label-' + locale
html.append(u'<span class="%s">%s</span>' % (html_class, translated_label))
return ''.join(html)
def GetStyleSheet():
"""Return a stylesheet that can be used to style i18n labels properly."""
styles = []
for locale in translation.LOCALES:
styles.append("""
.goofy-label-{locale} {{
display: none;
}}
.goofy-locale-{locale} .goofy-label-{locale} {{
display: inline;
}}""".format(locale=locale))
return '\n'.join(styles)
| 26.693878 | 79 | 0.646789 |
4a275b90c1639cabf4cd659fc51b6857ad4fb938 | 491 | py | Python | env/lib/python3.8/site-packages/plotly/validators/indicator/gauge/axis/_showticklabels.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/indicator/gauge/axis/_showticklabels.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/indicator/gauge/axis/_showticklabels.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class ShowticklabelsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="showticklabels", parent_name="indicator.gauge.axis", **kwargs
):
super(ShowticklabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
| 32.733333 | 88 | 0.655804 |
4a275c02f45d63d24f83857f778a60186fe1bf1c | 19,661 | py | Python | station_lmc.py | andreww5au/PaSD-client | 5ed7ff3e8b529c440fbfecb8c3159516285496c4 | [
"MIT"
] | null | null | null | station_lmc.py | andreww5au/PaSD-client | 5ed7ff3e8b529c440fbfecb8c3159516285496c4 | [
"MIT"
] | null | null | null | station_lmc.py | andreww5au/PaSD-client | 5ed7ff3e8b529c440fbfecb8c3159516285496c4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Manage a single PaSD station - control and monitor the hardware via Modbus commands to the specified IP
address, and update the relevant tables in the PaSD database. Monitor the port state tables in that database, and
send updated information as needed to the hardware in the field.
"""
import argparse
from configparser import ConfigParser as conparser
import datetime
from datetime import timezone
import logging
import sys
import time
import psycopg2
from psycopg2 import extras
LOGFILE = 'station_lmc.log'
CPPATH = ['/usr/local/etc/pasd.conf', '/usr/local/etc/pasd-local.conf',
'./pasd.conf', './pasd-local.conf']
DEFAULT_STATION_NUMBER = 1
FNDH_STATE_QUERY = """
UPDATE pasd_fndh_state
SET mbrv = %(mbrv)s, pcbrv = %(pcbrv)s, cpuid = %(cpuid)s, chipid = %(chipid)s,
firmware_version = %(firmware_version)s, uptime = %(uptime)s, psu48v1_voltage = %(psu48v1_voltage)s,
psu48v2_voltage = %(psu48v2_voltage)s, psu5v_voltage = %(psu5v_voltage)s, psu48v_current = %(psu48v_current)s,
psu48v_temp = %(psu48v_temp)s, psu5v_temp = %(psu5v_temp)s, pcb_temp = %(pcb_temp)s,
outside_temp = %(outside_temp)s, status = %(status)s, indicator_state = %(indicator_state)s,
readtime = %(readtime)s
WHERE (station_id = %(station_id)s)
"""
FNDH_PORT_QUERY = """
UPDATE pasd_fndh_port_status
SET smartbox_number = %(smartbox_address)s, system_online = %(system_online)s,
locally_forced_on = %(locally_forced_on)s, locally_forced_off = %(locally_forced_off)s,
power_state = %(power_state)s, power_sense = %(power_sense)s, status_timestamp = %(status_timestamp)s
WHERE (station_id = %(station_id)s) AND (pdoc_number = %(port_number)s)
"""
SMARTBOX_STATE_QUERY = """
UPDATE pasd_smartbox_state
SET mbrv = %(mbrv)s, pcbrv = %(pcbrv)s, cpuid = %(cpuid)s, chipid = %(chipid)s,
firmware_version = %(firmware_version)s, uptime = %(uptime)s, incoming_voltage = %(incoming_voltage)s,
psu_voltage = %(psu_voltage)s, psu_temp = %(psu_temp)s, pcb_temp = %(pcb_temp)s,
outside_temp = %(outside_temp)s, status = %(status)s,
indicator_state = %(indicator_state)s, readtime = %(readtime)s, pdoc_number = %(pdoc_number)s
WHERE (station_id = %(station_id)s) AND (smartbox_number = %(modbus_address)s)
"""
SMARTBOX_PORT_QUERY = """
UPDATE pasd_smartbox_port_status
SET system_online = %(system_online)s, current_draw = %(current)s, locally_forced_on = %(locally_forced_on)s,
locally_forced_off = %(locally_forced_off)s, breaker_tripped = %(breaker_tripped)s,
power_state = %(power_state)s, status_timestamp = %(status_timestamp)s,
current_draw_timestamp = %(current_timestamp)s
WHERE (station_id = %(station_id)s) AND (smartbox_number = %(modbus_address)s) AND (port_number = %(port_number)s)
"""
LAST_STARTUP_ATTEMPT_TIME = 0 # Timestamp for the last time we tried to start up the station
STARTUP_RETRY_INTERVAL = 600 # If the station isn't active, but is meant to be, wait this long before retrying startup
LAST_SHUTDOWN_ATTEMPT_TIME = 0 # Timestamp for the last time we tried to shut down the station
SHUTDOWN_RETRY_INTERVAL = 600 # If the station is active, but isnt meant to be, wait this long before retrying shutdown
def initialise_db(db, stn):
"""
Make sure that all state rows in the database tables exist (with empty contents), so that future writes can just
use 'update' queries instead of checking to see if they need to do 'insert' instead'.
If more than one row exists for any FNDH/smartbox/port, delete all of the duplicates and create a new empty row.
:param db: Database connection object
:param stn: An instance of station.Station(), used to get the station number.
:return:
"""
with db:
with db.cursor() as curs:
curs.execute('SELECT COUNT(*) FROM pasd_stations WHERE (station_id = %s)', (stn.station_id,))
if curs.fetchone()[0] != 1: # No rows match, or more than one row matches:
curs.execute('DELETE FROM pasd_stations WHERE (station_id = %s)', (stn.station_id,))
curs.execute('INSERT INTO pasd_stations (station_id) VALUES (%s)', (stn.station_id,))
curs.execute('SELECT COUNT(*) FROM pasd_fndh_state WHERE (station_id = %s)', (stn.station_id,))
if curs.fetchone()[0] != 1: # No rows match, or more than one row matches:
curs.execute('DELETE FROM pasd_fndh_state WHERE (station_id = %s)', (stn.station_id,))
curs.execute('INSERT INTO pasd_fndh_state (station_id) VALUES (%s)', (stn.station_id,))
for pnum in range(1, 29):
curs.execute('SELECT COUNT(*) FROM pasd_fndh_port_status WHERE (station_id = %s) AND (pdoc_number = %s)',
(stn.station_id, pnum))
if curs.fetchone()[0] != 1: # No rows match, or more than one row matches:
curs.execute('DELETE FROM pasd_fndh_port_status WHERE (station_id = %s) AND (pdoc_number = %s)',
(stn.station_id, pnum))
curs.execute('INSERT INTO pasd_fndh_port_status (station_id, pdoc_number) VALUES (%s, %s)',
(stn.station_id, pnum))
for sb_num in range(1, 25):
curs.execute('SELECT COUNT(*) FROM pasd_smartbox_state WHERE (station_id = %s) AND (smartbox_number = %s)',
(stn.station_id, sb_num))
if curs.fetchone()[0] != 1: # No rows match, or more than one row matches:
curs.execute('DELETE FROM pasd_smartbox_state WHERE (station_id = %s) AND (smartbox_number = %s)',
(stn.station_id, sb_num))
curs.execute('INSERT INTO pasd_smartbox_state (station_id, smartbox_number) VALUES (%s, %s)',
(stn.station_id, sb_num))
for pnum in range(1, 13):
curs.execute('SELECT COUNT(*) FROM pasd_smartbox_port_status WHERE (station_id = %s) AND (smartbox_number = %s) AND (port_number = %s)',
(stn.station_id, sb_num, pnum))
if curs.fetchone()[0] != 1: # No rows match, or more than one row matches:
curs.execute('DELETE FROM pasd_smartbox_port_status WHERE (station_id = %s) AND (smartbox_number = %s) AND (port_number = %s)',
(stn.station_id, sb_num, pnum))
curs.execute('INSERT INTO pasd_smartbox_port_status (station_id, smartbox_number, port_number) VALUES (%s, %s, %s)',
(stn.station_id, sb_num, pnum))
def update_db(db, stn):
"""
Write current instance data to the database (FNDH state, all 28 FNDH port states, all 24 smartbox states,
and all 288 smartbox ports states.
:param db: Database connection object
:param stn: An instance of station.Station(), with contents to write the database.
:return:
"""
# FNDH state table:
with db: # Commit transaction when block exits
with db.cursor() as curs:
stn.fndh.station_id = stn.station_id
curs.execute(FNDH_STATE_QUERY, stn.fndh.__dict__)
fpdata_list = []
for pnum, port in stn.fndh.ports.items():
tmpdict = port.__dict__.copy()
tmpdict['smartbox_number'] = stn.station_id
fpdata_list.append(tmpdict)
# FNDH port table:
with db: # Commit transaction when block exits
with db.cursor() as curs:
psycopg2.extras.execute_batch(curs, FNDH_PORT_QUERY, fpdata_list)
# Smartbox port table
sb_data_list = [] # Will end up with 24 dicts, one for each smartbox state
sb_ports_data_list = [] # Will end up with 288 dicts, one for each port state
if stn.active: # If the station is active, we have real smartbox data to send
for sb_num, sb in stn.smartboxes.items():
sb.station_id = stn.station_id
sb_data_list.append(sb.__dict__)
for pnum, port in sb.ports:
port.station_id = stn.station_id
sb_ports_data_list.append(port.__dict__)
else: # If the station is not active (smartboxes are all off), fill in empty smartbox data
for sb_num in range(1, 25):
for portnum in range(1, 13):
spdata = {'station_id':stn.station_id, 'modbus_address':sb_num, 'port_number':portnum,
'system_online':None, 'current_draw':None, 'locally_forced_on':None,
'locally_forced_off':None, 'breaker_tripped':None,
'power_state':None, 'status_timestamp':datetime.datetime.now(timezone.utc),
'current_timestamp':None}
sb_ports_data_list.append(spdata)
with db: # Commit transaction when block exits
with db.cursor() as curs:
psycopg2.extras.execute_batch(curs, SMARTBOX_STATE_QUERY, sb_data_list)
psycopg2.extras.execute_batch(curs, SMARTBOX_PORT_QUERY, sb_ports_data_list)
def get_antenna_map(db, station_number=DEFAULT_STATION_NUMBER):
"""
Query the database to find the antenna->smartbox/port mapping, and return it as a dict of dicts.
The returned dict has smartbox address (1-24) as key. The values are dicts with port number (1-12) as key,
and antenna number (1-256) as value (or None). All 288 possible smartbox ports must be in the antenna map.
:param db: Database connection object
:param station_number: Station ID (1-9999)
:return: Antenna map (dict of dicts)
"""
# Create antenna map structure with all 288 ports set to None, to make sure none are missing
ant_map = {}
for sid in range(1, 25):
ant_map[sid] = {pid:None for pid in range(1, 13)}
with db: # Commit transaction when block exits
with db.cursor() as curs:
query = """SELECT antenna_number, smartbox_number, port_number
FROM pasd_antenna_portmap
WHERE (station_id=%s) and begintime < now() and endtime > now()
"""
curs.execute(query, (station_number,))
# Fill in mapping data from the query
for row in curs:
antenna_number, smartbox_number, port_number = row
ant_map[smartbox_number][port_number] = antenna_number
return ant_map
def get_all_port_configs(db, station_number=DEFAULT_STATION_NUMBER):
"""
Query the database to get the smartbox port state dictionary, for all smartboxes.
Result is a tuple of two dictionaries - the first contains all 28 FNDH port configs, the second all 288 smartbox
port configs.
The FNDH port config dictionary is a dict with port number (1-28) as key, and a list of two booleans of 0/1
integers as value, where the first item is the 'desire_enabled_online', and the second is the
'desire_enabled_offline'.
The smartbox port dictionary has smartbox number as the key. Each value is a dict with port number (1-12) as key, and a
list of two booleans of 0/1 integers as value, where the first item is the 'desire_enabled_online', and the second
is the 'desire_enabled_offline'.
:param db: Database connection object
:param station_number: Station ID (1-9999)
:return: port configuration for that smartbox (dict of dicts)
"""
with db: # Commit transaction when block exits
with db.cursor() as curs:
# Read FNDH port config for this station:
query = """SELECT pdoc_number, desire_enabled_online, desire_enabled_offline
FROM pasd_fndh_port_status
WHERE station_id=%s"""
curs.execute(query, (station_number,))
fndhpc = {i:[False, False] for i in range(1, 29)}
for row in curs:
pdoc_number, desire_enabled_online, desire_enabled_offline = row
fndhpc[pdoc_number] = bool(desire_enabled_online), bool(desire_enabled_offline)
# Read all smartbox port configs for this station:
query = """SELECT smartbox_number, port_number, desire_enabled_online, desire_enabled_offline
FROM pasd_smartbox_port_status
WHERE station_id=%s"""
curs.execute(query, (station_number,))
sbpc = {}
for sid in range(1, 25):
sbpc[sid] = {i:[False, False] for i in range(1, 13)}
for row in curs:
smartbox_number, port_number, desire_enabled_online, desire_enabled_offline = row
sbpc[smartbox_number][port_number] = bool(desire_enabled_online), bool(desire_enabled_offline)
return fndhpc, sbpc
def update_station_state(db, stn):
"""
Write the current station state (stn.active, stn.status, etc) to the 'stations' table in the database.
:param db: Database connection object
:param stn: An instance of station.Station()
:return: The current value of the desired_active row in the stations table entry for this station.
"""
query = "UPDATE pasd_stations SET active = %s, status = %s, status_timestamp = %s WHERE station_id = %s"
with db:
with db.cursor() as curs:
curs.execute(query, (stn.active. stn.status, datetime.datetime.now(timezone.utc), stn.station_id))
curs.execute("SELECT desired_active FROM pasd_stations WHERE station_id = %s", (stn.station_id,))
rows = curs.fetchall()
if len(rows) > 1:
stn.logger.critical('Multiple records in stations table for station ID=%d' % (stn.station_id))
sys.exit()
else:
desired_active = rows[0][0]
return desired_active
def main_loop(db, stn):
"""
Run forever in a loop
-Query the field hardware to get all the current sensor and port parameters and update the instance data
-Use the instance data to update the database sensor and port parameters
-Query the database to look for commanded changes in station or port state
-Write the commanded state data to the field hardware if it's different
-Query the stations table to see if we're meant to start up, or shut down
:param db: Database connection object
:param stn: An instance of station.Station()
:return:
"""
while not stn.wants_exit:
# Query the field hardware to get all the current sensor and port parameters and update the instance data
stn.poll_data() # If station is not active, only FNDH data can be polled
# Use the instance data to update the database sensor and port parameters
update_db(db, stn=stn)
# Query the database to see if the desired port config is different to the polled port config
fndhpc, sbpc = get_all_port_configs(db, station_number=stn.station_id)
needs_write = False
for pid in stn.fndh.ports.keys():
p = stn.fndh.ports[pid]
desire_enabled_online, desire_enabled_offline = fndhpc[pid]
if (p.desire_enabled_online != desire_enabled_online):
p.desire_enabled_online = desire_enabled_online
needs_write = True
if (p.desire_enabled_offline != desire_enabled_offline):
p.desire_enabled_offline = desire_enabled_offline
needs_write = True
if needs_write:
stn.fndh.write_portconfig()
time.sleep(1.0) # Allow time for a smartbox to boot, if it's being turned on here.
for sid in stn.smartboxes.keys():
needs_write = False
for pid in stn.smartboxes[sid].ports.keys():
p = stn.smartboxes[sid].ports[pid]
desire_enabled_online, desire_enabled_offline = sbpc[sid][pid]
if (p.desire_enabled_online != desire_enabled_online):
p.desire_enabled_online = desire_enabled_online
needs_write = True
if (p.desire_enabled_offline != desire_enabled_offline):
p.desire_enabled_offline = desire_enabled_offline
needs_write = True
if needs_write:
stn.smartboxes[sid].write_portconfig()
desired_active = update_station_state(db, stn=stn)
if ( (desired_active and
(not stn.active) and
((time.time() - LAST_STARTUP_ATTEMPT_TIME) > STARTUP_RETRY_INTERVAL)) ):
stn.startup()
elif ( (not desired_active) and
stn.active and
((time.time() - LAST_SHUTDOWN_ATTEMPT_TIME) > SHUTDOWN_RETRY_INTERVAL) ):
stn.shutdown()
if __name__ == '__main__':
CP = conparser(defaults={})
CPfile = CP.read(CPPATH)
if not CPfile:
print("None of the specified configuration files found by mwaconfig.py: %s" % (CPPATH,))
parser = argparse.ArgumentParser(description='Run a PaSD station',
epilog='Run this as "python -i %s" to drop into the Python prompt after starting up.' % sys.argv[0])
parser.add_argument('--host', dest='host', default=None,
help='Hostname of an ethernet-serial gateway, eg 134.7.50.185')
parser.add_argument('--device', dest='device', default=None,
help='Serial port device name, eg /dev/ttyS0 or COM6')
parser.add_argument('--id', '--station_id', dest='station_id', default=DEFAULT_STATION_NUMBER,
help='Station number (1-9999)')
parser.add_argument('--debug', dest='debug', default=False, action='store_true',
help='If given, drop to the DEBUG log level, otherwise use INFO')
args = parser.parse_args()
if (args.host is None) and (args.device is None):
args.host = 'pasd-fndh'
if args.debug:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
config = CP['station_%03d' % args.station_id]
dbuser = config['dbuser']
dbhost = config['dbhost']
dbpass = config['dbpass']
dbname = config['dbname']
db = psycopg2.connect(user=dbuser, password=dbpass, host=dbhost, database=dbname)
fh = logging.FileHandler(filename=LOGFILE, mode='w')
fh.setLevel(logging.DEBUG) # All log messages go to the log file
sh = logging.StreamHandler()
sh.setLevel(loglevel) # Some or all log messages go to the console
logging.basicConfig(handlers=[fh, sh],
level=logging.DEBUG,
format='%(levelname)s:%(name)s %(created)14.3f - %(message)s')
from pasd import transport
from pasd import station
tlogger = logging.getLogger('T')
if loglevel == logging.DEBUG:
print('Setting transport log level to info, DEBUG is very spammy. All other logging is at DEBUG level.')
tlogger.setLevel(logging.INFO)
conn = transport.Connection(hostname=args.host, devicename=args.device, multidrop=False, logger=tlogger)
fndhpc, sbpc = get_all_port_configs(db, station_number=args.station_id)
slogger = logging.getLogger('ST')
s = station.Station(conn=conn,
station_id=args.station_id,
antenna_map=get_antenna_map(db, args.station_id),
portconfig_fndh=fndhpc,
portconfig_smartboxes=sbpc,
logger=slogger)
print('Starting up entire station as "s" - FNDH on address 31, SMARTboxes on addresses 1-24.')
| 48.90796 | 156 | 0.641676 |
4a275c46532f4459879f3d271d24bb8e70571c0f | 874 | py | Python | data_science_layer/preprocessing/log_tranformation.py | nathangeology/cyclist_dataset | 44ad4a3765e86cba934bfdbfb151a788eddfbead | [
"MIT"
] | null | null | null | data_science_layer/preprocessing/log_tranformation.py | nathangeology/cyclist_dataset | 44ad4a3765e86cba934bfdbfb151a788eddfbead | [
"MIT"
] | null | null | null | data_science_layer/preprocessing/log_tranformation.py | nathangeology/cyclist_dataset | 44ad4a3765e86cba934bfdbfb151a788eddfbead | [
"MIT"
] | null | null | null | from data_science_layer.preprocessing.abstract_pre_processor import AbstractPreProcessor
import pandas as pd
import numpy as np
class LogTransformation(AbstractPreProcessor):
def fit_transform(self, data, y=None):
return self._process_data(data=data)
def fit(self, data, y=None):
return None
def transform(self, data, y=None):
return self._process_data(data=data)
def _process_data(self, data):
try:
output = np.log10(data)
except Exception as e:
print(e)
output = np.log10(np.float32(data.values))
output = self._check_output(data, output)
return output
def _check_output(self, input, output):
if isinstance(input, pd.DataFrame):
output = pd.DataFrame(data=output, index=input.index, columns=input.columns)
return output
| 28.193548 | 88 | 0.661327 |
4a275ccb627103219fac829dcd23cad8a7cedfe1 | 50,319 | py | Python | BtS/Assets/Python/CvAdvisorUtils.py | f1rpo/Civ4CE | ba64c3545b479887739ad0ff78605b51b6fa57f9 | [
"CNRI-Python"
] | null | null | null | BtS/Assets/Python/CvAdvisorUtils.py | f1rpo/Civ4CE | ba64c3545b479887739ad0ff78605b51b6fa57f9 | [
"CNRI-Python"
] | null | null | null | BtS/Assets/Python/CvAdvisorUtils.py | f1rpo/Civ4CE | ba64c3545b479887739ad0ff78605b51b6fa57f9 | [
"CNRI-Python"
] | null | null | null | ## Sid Meier's Civilization 4
## Copyright Firaxis Games 2005
##
## CvAdvisorUtils
from CvPythonExtensions import *
import PyHelpers
gc = CyGlobalContext()
localText = CyTranslator()
PyPlayer = PyHelpers.PyPlayer
g_iAdvisorNags = 0
g_listNoLiberateCities = []
def resetAdvisorNags():
global g_iAdvisorNags
g_iAdvisorNags = 0
def resetNoLiberateCities():
global g_listNoLiberateCities
g_listNoLiberateCities = []
def featPopup(iPlayer):
if (not gc.getPlayer(iPlayer).isOption(PlayerOptionTypes.PLAYEROPTION_ADVISOR_POPUPS)):
return False
if (not gc.getPlayer(iPlayer).isHuman()):
return False
if (gc.getGame().isNetworkMultiPlayer()):
return False
if (gc.getGame().getElapsedGameTurns() == 0):
return False
return True
def populationFeat(iPlayer, eFeat, szText):
if (not gc.getPlayer(iPlayer).isFeatAccomplished(eFeat)):
gc.getPlayer(iPlayer).setFeatAccomplished(eFeat, True)
if (featPopup(iPlayer)):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(eFeat)
popupInfo.setText(localText.getText(szText, (gc.getPlayer(iPlayer).getCivilizationDescriptionKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(iPlayer)
def unitBuiltFeats(pCity, pUnit):
if (not gc.getPlayer(pCity.getOwner()).isFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_ARCHER)):
if (pUnit.getUnitCombatType() == gc.getInfoTypeForString("UNITCOMBAT_ARCHER")):
gc.getPlayer(pCity.getOwner()).setFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_ARCHER, True)
if (featPopup(pCity.getOwner()) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_UNITCOMBAT_ARCHER)
popupInfo.setData2(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_UNITCOMBAT_ARCHER", (pUnit.getNameKey(), pCity.getNameKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(pCity.getOwner())
if (not gc.getPlayer(pCity.getOwner()).isFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_MOUNTED)):
if (pUnit.getUnitCombatType() == gc.getInfoTypeForString("UNITCOMBAT_MOUNTED")):
gc.getPlayer(pCity.getOwner()).setFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_MOUNTED, True)
if (featPopup(pCity.getOwner()) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_UNITCOMBAT_MOUNTED)
popupInfo.setData2(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_UNITCOMBAT_MOUNTED", (pUnit.getNameKey(), pCity.getNameKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(pCity.getOwner())
if (not gc.getPlayer(pCity.getOwner()).isFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_MELEE)):
if (pUnit.getUnitCombatType() == gc.getInfoTypeForString("UNITCOMBAT_MELEE")):
gc.getPlayer(pCity.getOwner()).setFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_MELEE, True)
if (featPopup(pCity.getOwner()) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_UNITCOMBAT_MELEE)
popupInfo.setData2(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_UNITCOMBAT_MELEE", (pUnit.getNameKey(), pCity.getNameKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(pCity.getOwner())
if (not gc.getPlayer(pCity.getOwner()).isFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_SIEGE)):
if (pUnit.getUnitCombatType() == gc.getInfoTypeForString("UNITCOMBAT_SIEGE")):
gc.getPlayer(pCity.getOwner()).setFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_SIEGE, True)
if (featPopup(pCity.getOwner()) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_UNITCOMBAT_SIEGE)
popupInfo.setData2(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_UNITCOMBAT_SIEGE", (pUnit.getNameKey(), pCity.getNameKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(pCity.getOwner())
if (not gc.getPlayer(pCity.getOwner()).isFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_GUN)):
if (pUnit.getUnitCombatType() == gc.getInfoTypeForString("UNITCOMBAT_GUN")):
gc.getPlayer(pCity.getOwner()).setFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_GUN, True)
if (featPopup(pCity.getOwner()) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_UNITCOMBAT_GUN)
popupInfo.setData2(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_UNITCOMBAT_GUN", (pUnit.getNameKey(), pCity.getNameKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(pCity.getOwner())
if (not gc.getPlayer(pCity.getOwner()).isFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_ARMOR)):
if (pUnit.getUnitCombatType() == gc.getInfoTypeForString("UNITCOMBAT_ARMOR")):
gc.getPlayer(pCity.getOwner()).setFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_ARMOR, True)
if (featPopup(pCity.getOwner()) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_UNITCOMBAT_ARMOR)
popupInfo.setData2(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_UNITCOMBAT_ARMOR", (pUnit.getNameKey(), pCity.getNameKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(pCity.getOwner())
if (not gc.getPlayer(pCity.getOwner()).isFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_HELICOPTER)):
if (pUnit.getUnitCombatType() == gc.getInfoTypeForString("UNITCOMBAT_HELICOPTER")):
gc.getPlayer(pCity.getOwner()).setFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_HELICOPTER, True)
if (featPopup(pCity.getOwner()) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_UNITCOMBAT_HELICOPTER)
popupInfo.setData2(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_UNITCOMBAT_HELICOPTER", (pUnit.getNameKey(), pCity.getNameKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(pCity.getOwner())
if (not gc.getPlayer(pCity.getOwner()).isFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_NAVAL)):
if (pUnit.getUnitCombatType() == gc.getInfoTypeForString("UNITCOMBAT_NAVAL")):
gc.getPlayer(pCity.getOwner()).setFeatAccomplished(FeatTypes.FEAT_UNITCOMBAT_NAVAL, True)
if (featPopup(pCity.getOwner()) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_UNITCOMBAT_NAVAL)
popupInfo.setData2(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_UNITCOMBAT_NAVAL", (pUnit.getNameKey(), pCity.getNameKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(pCity.getOwner())
if (not gc.getPlayer(pCity.getOwner()).isFeatAccomplished(FeatTypes.FEAT_UNIT_PRIVATEER)):
if (pUnit.getUnitType() == gc.getInfoTypeForString("UNIT_PRIVATEER")):
gc.getPlayer(pCity.getOwner()).setFeatAccomplished(FeatTypes.FEAT_UNIT_PRIVATEER, True)
if (featPopup(pCity.getOwner()) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_UNIT_PRIVATEER)
popupInfo.setData2(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_UNIT_PRIVATEER", (pUnit.getNameKey(), pCity.getNameKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(pCity.getOwner())
if (not gc.getPlayer(pCity.getOwner()).isFeatAccomplished(FeatTypes.FEAT_UNIT_SPY)):
if (pUnit.getUnitType() == gc.getInfoTypeForString("UNIT_SPY")):
gc.getPlayer(pCity.getOwner()).setFeatAccomplished(FeatTypes.FEAT_UNIT_SPY, True)
if (featPopup(pCity.getOwner()) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_UNIT_SPY)
popupInfo.setData2(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_UNIT_SPY", (pUnit.getNameKey(), pCity.getNameKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(pCity.getOwner())
def buildingBuiltFeats(pCity, iBuildingType):
if (not gc.getPlayer(pCity.getOwner()).isFeatAccomplished(FeatTypes.FEAT_NATIONAL_WONDER)):
if (isNationalWonderClass(gc.getBuildingInfo(iBuildingType).getBuildingClassType())):
gc.getPlayer(pCity.getOwner()).setFeatAccomplished(FeatTypes.FEAT_NATIONAL_WONDER, True)
if (featPopup(pCity.getOwner()) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_NATIONAL_WONDER)
popupInfo.setData2(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_NATIONAL_WONDER", (gc.getBuildingInfo(iBuildingType).getTextKey(), pCity.getNameKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(pCity.getOwner())
def endTurnFeats(iPlayer):
lRealPopulation = gc.getPlayer(iPlayer).getRealPopulation()
if (lRealPopulation > 500000):
populationFeat(iPlayer, FeatTypes.FEAT_POPULATION_HALF_MILLION, "TXT_KEY_FEAT_HALF_MILLION")
if (lRealPopulation > 1000000):
populationFeat(iPlayer, FeatTypes.FEAT_POPULATION_1_MILLION, "TXT_KEY_FEAT_1_MILLION")
if (lRealPopulation > 2000000):
populationFeat(iPlayer, FeatTypes.FEAT_POPULATION_2_MILLION, "TXT_KEY_FEAT_2_MILLION")
if (lRealPopulation > 5000000):
populationFeat(iPlayer, FeatTypes.FEAT_POPULATION_5_MILLION, "TXT_KEY_FEAT_5_MILLION")
if (lRealPopulation > 10000000):
populationFeat(iPlayer, FeatTypes.FEAT_POPULATION_10_MILLION, "TXT_KEY_FEAT_10_MILLION")
if (lRealPopulation > 20000000):
populationFeat(iPlayer, FeatTypes.FEAT_POPULATION_20_MILLION, "TXT_KEY_FEAT_20_MILLION")
if (lRealPopulation > 50000000):
populationFeat(iPlayer, FeatTypes.FEAT_POPULATION_50_MILLION, "TXT_KEY_FEAT_50_MILLION")
if (lRealPopulation > 100000000):
populationFeat(iPlayer, FeatTypes.FEAT_POPULATION_100_MILLION, "TXT_KEY_FEAT_100_MILLION")
if (lRealPopulation > 200000000):
populationFeat(iPlayer, FeatTypes.FEAT_POPULATION_200_MILLION, "TXT_KEY_FEAT_200_MILLION")
if (lRealPopulation > 500000000):
populationFeat(iPlayer, FeatTypes.FEAT_POPULATION_500_MILLION, "TXT_KEY_FEAT_500_MILLION")
if (lRealPopulation > 1000000000):
populationFeat(iPlayer, FeatTypes.FEAT_POPULATION_1_BILLION, "TXT_KEY_FEAT_1_BILLION")
if (lRealPopulation > 2000000000):
populationFeat(iPlayer, FeatTypes.FEAT_POPULATION_2_BILLION, "TXT_KEY_FEAT_2_BILLION")
if (not gc.getPlayer(iPlayer).isFeatAccomplished(FeatTypes.FEAT_TRADE_ROUTE)):
apCityList = PyPlayer(iPlayer).getCityList()
for pCity in apCityList:
if (not pCity.isCapital()):
if (pCity.isConnectedToCapital(iPlayer)):
gc.getPlayer(iPlayer).setFeatAccomplished(FeatTypes.FEAT_TRADE_ROUTE, True)
if (featPopup(iPlayer) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_TRADE_ROUTE)
popupInfo.setData2(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_TRADE_ROUTE", (pCity.getNameKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(iPlayer)
break
pCapitalCity = gc.getPlayer(iPlayer).getCapitalCity()
if (not pCapitalCity.isNone()):
if (not gc.getPlayer(iPlayer).isFeatAccomplished(FeatTypes.FEAT_COPPER_CONNECTED)):
iBonus = gc.getInfoTypeForString("BONUS_COPPER")
if (iBonus != BonusTypes.NO_BONUS):
if (pCapitalCity.hasBonus(iBonus)):
gc.getPlayer(iPlayer).setFeatAccomplished(FeatTypes.FEAT_COPPER_CONNECTED, True)
if (featPopup(iPlayer) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_COPPER_CONNECTED)
popupInfo.setData2(pCapitalCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_COPPER_CONNECTED", ()))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(iPlayer)
if (not gc.getPlayer(iPlayer).isFeatAccomplished(FeatTypes.FEAT_HORSE_CONNECTED)):
iBonus = gc.getInfoTypeForString("BONUS_HORSE")
if (iBonus != BonusTypes.NO_BONUS):
if (pCapitalCity.hasBonus(iBonus)):
gc.getPlayer(iPlayer).setFeatAccomplished(FeatTypes.FEAT_HORSE_CONNECTED, True)
if (featPopup(iPlayer) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_HORSE_CONNECTED)
popupInfo.setData2(pCapitalCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_HORSE_CONNECTED", ()))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(iPlayer)
if (not gc.getPlayer(iPlayer).isFeatAccomplished(FeatTypes.FEAT_IRON_CONNECTED)):
iBonus = gc.getInfoTypeForString("BONUS_IRON")
if (iBonus != BonusTypes.NO_BONUS):
if (pCapitalCity.hasBonus(iBonus)):
gc.getPlayer(iPlayer).setFeatAccomplished(FeatTypes.FEAT_IRON_CONNECTED, True)
if (featPopup(iPlayer) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_IRON_CONNECTED)
popupInfo.setData2(pCapitalCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_IRON_CONNECTED", ()))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(iPlayer)
if (not gc.getPlayer(iPlayer).isFeatAccomplished(FeatTypes.FEAT_LUXURY_CONNECTED)):
for iI in range(gc.getNumBonusInfos()):
if (gc.getBonusInfo(iI).getHappiness() > 0):
if (pCapitalCity.hasBonus(iI)):
gc.getPlayer(iPlayer).setFeatAccomplished(FeatTypes.FEAT_LUXURY_CONNECTED, True)
if (featPopup(iPlayer) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_LUXURY_CONNECTED)
popupInfo.setData2(pCapitalCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_LUXURY_CONNECTED", (gc.getBonusInfo(iI).getTextKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(iPlayer)
break
if (not gc.getPlayer(iPlayer).isFeatAccomplished(FeatTypes.FEAT_FOOD_CONNECTED)):
for iI in range(gc.getNumBonusInfos()):
if (gc.getBonusInfo(iI).getHealth() > 0):
if (pCapitalCity.hasBonus(iI)):
gc.getPlayer(iPlayer).setFeatAccomplished(FeatTypes.FEAT_FOOD_CONNECTED, True)
if (featPopup(iPlayer) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_FOOD_CONNECTED)
popupInfo.setData2(pCapitalCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_FOOD_CONNECTED", (gc.getBonusInfo(iI).getTextKey(), )))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(iPlayer)
break
if (not gc.getPlayer(iPlayer).isFeatAccomplished(FeatTypes.FEAT_CORPORATION_ENABLED)):
for iI in range(gc.getNumBuildingInfos()):
eCorporation = gc.getBuildingInfo(iI).getFoundsCorporation()
if eCorporation != -1 and not gc.getGame().isCorporationFounded(eCorporation):
bValid = true
eTeam = gc.getPlayer(iPlayer).getTeam()
if not gc.getTeam(eTeam).isHasTech(gc.getBuildingInfo(iI).getPrereqAndTech()):
bValid = false
if bValid:
for iPrereq in range(gc.getDefineINT("NUM_BUILDING_AND_TECH_PREREQS")):
if not gc.getTeam(eTeam).isHasTech(gc.getBuildingInfo(iI).getPrereqAndTechs(iPrereq)):
bValid = false
break
if bValid:
gc.getPlayer(iPlayer).setFeatAccomplished(FeatTypes.FEAT_CORPORATION_ENABLED, True)
szBonusList = u""
bFirst = true
for iPrereq in range(gc.getDefineINT("NUM_CORPORATION_PREREQ_BONUSES")):
eBonus = gc.getCorporationInfo(eCorporation).getPrereqBonus(iPrereq)
if eBonus != -1:
if bFirst:
bFirst = false
else:
szBonusList += localText.getText("TXT_KEY_OR", ())
szBonusList += gc.getBonusInfo(eBonus).getDescription()
szFounder = u""
for iUnit in range(gc.getNumUnitInfos()):
if gc.getUnitInfo(iUnit).getBuildings(iI) or gc.getUnitInfo(iUnit).getForceBuildings(iI):
szFounder = gc.getUnitInfo(iUnit).getTextKey()
break
if (featPopup(iPlayer) and (gc.getGame().getStartYear() == gc.getDefineINT("START_YEAR"))):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(FeatTypes.FEAT_CORPORATION_ENABLED)
popupInfo.setData2(pCapitalCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_FEAT_CORPORATION_ENABLED", (gc.getCorporationInfo(eCorporation).getTextKey(), szFounder, szBonusList)))
popupInfo.setOnClickedPythonCallback("featAccomplishedOnClickedCallback")
popupInfo.setOnFocusPythonCallback("featAccomplishedOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_OK", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_FEAT_ACCOMPLISHED_MORE", ()), "")
popupInfo.addPopup(iPlayer)
break
def cityAdvise(pCity, iPlayer):
global g_iAdvisorNags
if (g_iAdvisorNags >= 2):
return
if (pCity.isDisorder()):
return
if (gc.getPlayer(iPlayer).isOption(PlayerOptionTypes.PLAYEROPTION_ADVISOR_POPUPS) and gc.getPlayer(iPlayer).isHuman() and not gc.getGame().isNetworkMultiPlayer()):
if (gc.getGame().getGameTurn() % 40 == pCity.getGameTurnFounded() % 40):
if (not pCity.getID() in g_listNoLiberateCities):
eLiberationPlayer = pCity.getLiberationPlayer(false)
if (eLiberationPlayer != -1):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_POPUP_LIBERATION_DEMAND", (pCity.getNameKey(), gc.getPlayer(eLiberationPlayer).getCivilizationDescriptionKey(), gc.getPlayer(eLiberationPlayer).getNameKey())))
popupInfo.setOnClickedPythonCallback("liberateOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_AGREE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_listNoLiberateCities.append(pCity.getID())
g_iAdvisorNags += 1
elif (gc.getPlayer(iPlayer).canSplitEmpire() and gc.getPlayer(iPlayer).canSplitArea(pCity.area().getID()) and pCity.AI_cityValue() < 0):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setText(localText.getText("TXT_KEY_POPUP_COLONY_DEMAND", (pCity.getNameKey(), )))
popupInfo.setOnClickedPythonCallback("colonyOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_AGREE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_listNoLiberateCities.append(pCity.getID())
g_iAdvisorNags += 1
if (pCity.isProduction()):
if (not pCity.isProductionUnit() and (pCity.getOrderQueueLength() <= 1)):
if (gc.getGame().getGameTurn() + 1) % 40 == pCity.getGameTurnFounded() % 40:
if ((gc.getGame().getElapsedGameTurns() < 200) and (pCity.getPopulation() > 2) and (gc.getPlayer(iPlayer).AI_totalAreaUnitAIs(pCity.area(), UnitAITypes.UNITAI_SETTLE) == 0) and not gc.getPlayer(iPlayer).AI_isFinancialTrouble() and (pCity.area().getBestFoundValue(iPlayer) > 0)):
iBestValue = 0
eBestUnit = UnitTypes.NO_UNIT
for iI in range(gc.getNumUnitClassInfos()):
if (not isLimitedUnitClass(iI)):
eLoopUnit = gc.getCivilizationInfo(gc.getPlayer(iPlayer).getCivilizationType()).getCivilizationUnits(iI)
if (eLoopUnit != UnitTypes.NO_UNIT):
if (gc.getUnitInfo(eLoopUnit).getDomainType() == DomainTypes.DOMAIN_LAND):
if pCity.canTrain(eLoopUnit, False, False):
if (pCity.getFirstUnitOrder(eLoopUnit) == -1):
iValue = gc.getPlayer(iPlayer).AI_unitValue(eLoopUnit, UnitAITypes.UNITAI_SETTLE, pCity.area())
if (iValue > iBestValue):
iBestValue = iValue
eBestUnit = eLoopUnit
if (eBestUnit != UnitTypes.NO_UNIT):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setData2(OrderTypes.ORDER_TRAIN)
popupInfo.setData3(eBestUnit)
popupInfo.setText(localText.getText("TXT_KEY_POPUP_UNIT_SETTLE_DEMAND", (gc.getUnitInfo(eBestUnit).getTextKey(), )))
popupInfo.setOnClickedPythonCallback("cityWarningOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_AGREE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_iAdvisorNags += 1
if (gc.getGame().getGameTurn() + 5) % 40 == pCity.getGameTurnFounded() % 40:
if ((pCity.getPopulation() > 1) and (pCity.countNumImprovedPlots() == 0) and (pCity.AI_countBestBuilds(pCity.area()) > 3)):
iBestValue = 0
eBestUnit = UnitTypes.NO_UNIT
for iI in range(gc.getNumUnitClassInfos()):
if (not isLimitedUnitClass(iI)):
eLoopUnit = gc.getCivilizationInfo(gc.getPlayer(iPlayer).getCivilizationType()).getCivilizationUnits(iI)
if (eLoopUnit != UnitTypes.NO_UNIT):
if (gc.getUnitInfo(eLoopUnit).getDomainType() == DomainTypes.DOMAIN_LAND):
if pCity.canTrain(eLoopUnit, False, False):
if (pCity.getFirstUnitOrder(eLoopUnit) == -1):
iValue = gc.getPlayer(iPlayer).AI_unitValue(eLoopUnit, UnitAITypes.UNITAI_WORKER, pCity.area())
if (iValue > iBestValue):
iBestValue = iValue
eBestUnit = eLoopUnit
if (eBestUnit != UnitTypes.NO_UNIT):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setData2(OrderTypes.ORDER_TRAIN)
popupInfo.setData3(eBestUnit)
popupInfo.setText(localText.getText("TXT_KEY_POPUP_UNIT_WORKER_DEMAND", (pCity.getNameKey(), gc.getUnitInfo(eBestUnit).getTextKey())))
popupInfo.setOnClickedPythonCallback("cityWarningOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_AGREE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_iAdvisorNags += 1
if (gc.getGame().getGameTurn() + 9) % 40 == pCity.getGameTurnFounded() % 40:
if (pCity.plot().getNumDefenders(iPlayer) == 0):
iBestValue = 0
eBestUnit = UnitTypes.NO_UNIT
for iI in range(gc.getNumUnitClassInfos()):
if (not isLimitedUnitClass(iI)):
eLoopUnit = gc.getCivilizationInfo(gc.getPlayer(iPlayer).getCivilizationType()).getCivilizationUnits(iI)
if (eLoopUnit != UnitTypes.NO_UNIT):
if (gc.getUnitInfo(eLoopUnit).getDomainType() == DomainTypes.DOMAIN_LAND):
if pCity.canTrain(eLoopUnit, False, False):
iValue = (gc.getPlayer(iPlayer).AI_unitValue(eLoopUnit, UnitAITypes.UNITAI_CITY_DEFENSE, pCity.area()) * 2)
iValue += gc.getPlayer(iPlayer).AI_unitValue(eLoopUnit, UnitAITypes.UNITAI_ATTACK, pCity.area())
if (iValue > iBestValue):
iBestValue = iValue
eBestUnit = eLoopUnit
if (eBestUnit != UnitTypes.NO_UNIT):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setData2(OrderTypes.ORDER_TRAIN)
popupInfo.setData3(eBestUnit)
popupInfo.setText(localText.getText("TXT_KEY_POPUP_UNIT_DEFENSE_DEMAND", (pCity.getNameKey(), gc.getUnitInfo(eBestUnit).getTextKey())))
popupInfo.setOnClickedPythonCallback("cityWarningOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_AGREE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_iAdvisorNags += 1
if (gc.getGame().getGameTurn() + 12) % 40 == pCity.getGameTurnFounded() % 40:
if ((gc.getPlayer(iPlayer).AI_totalAreaUnitAIs(pCity.area(), UnitAITypes.UNITAI_MISSIONARY) == 0) and (gc.getTeam(gc.getPlayer(iPlayer).getTeam()).getAtWarCount(True) == 0)):
eStateReligion = gc.getPlayer(iPlayer).getStateReligion()
if (eStateReligion != ReligionTypes.NO_RELIGION):
if (gc.getPlayer(iPlayer).getHasReligionCount(eStateReligion) < (gc.getPlayer(iPlayer).getNumCities() / 2)):
iBestValue = 0
eBestUnit = UnitTypes.NO_UNIT
for iI in range(gc.getNumUnitClassInfos()):
eLoopUnit = gc.getCivilizationInfo(gc.getPlayer(iPlayer).getCivilizationType()).getCivilizationUnits(iI)
if (eLoopUnit != UnitTypes.NO_UNIT):
if (gc.getUnitInfo(eLoopUnit).getDomainType() == DomainTypes.DOMAIN_LAND):
if (gc.getUnitInfo(eLoopUnit).getReligionSpreads(eStateReligion)):
if pCity.canTrain(eLoopUnit, False, False):
iValue = gc.getPlayer(iPlayer).AI_unitValue(eLoopUnit, UnitAITypes.UNITAI_MISSIONARY, pCity.area())
if (iValue > iBestValue):
iBestValue = iValue
eBestUnit = eLoopUnit
if (eBestUnit != UnitTypes.NO_UNIT):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setData2(OrderTypes.ORDER_TRAIN)
popupInfo.setData3(eBestUnit)
popupInfo.setText(localText.getText("TXT_KEY_POPUP_MISSIONARY_DEMAND", (gc.getReligionInfo(eStateReligion).getTextKey(), gc.getUnitInfo(eBestUnit).getTextKey(), pCity.getNameKey())))
popupInfo.setOnClickedPythonCallback("cityWarningOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_AGREE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_iAdvisorNags += 1
if (not pCity.isProductionBuilding() and (pCity.getOrderQueueLength() <= 1)):
if (pCity.healthRate(False, 0) < 0):
if (gc.getGame().getGameTurn() + 2) % 40 == pCity.getGameTurnFounded() % 40:
iBestValue = 0
eBestBuilding = BuildingTypes.NO_BUILDING
for iI in range(gc.getNumBuildingClassInfos()):
if (not isLimitedWonderClass(iI)):
eLoopBuilding = gc.getCivilizationInfo(gc.getPlayer(iPlayer).getCivilizationType()).getCivilizationBuildings(iI)
if (eLoopBuilding != BuildingTypes.NO_BUILDING):
if (gc.getBuildingInfo(eLoopBuilding).getHealth() > 0):
if pCity.canConstruct(eLoopBuilding, False, False, False):
iValue = gc.getBuildingInfo(eLoopBuilding).getHealth()
if (iValue > iBestValue):
iBestValue = iValue
eBestBuilding = eLoopBuilding
if (eBestBuilding != BuildingTypes.NO_BUILDING):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setData2(OrderTypes.ORDER_CONSTRUCT)
popupInfo.setData3(eBestBuilding)
popupInfo.setText(localText.getText("TXT_KEY_POPUP_UNHEALTHY_CITIZENS_DEMAND", (pCity.getNameKey(), gc.getBuildingInfo(eBestBuilding).getTextKey())))
popupInfo.setOnClickedPythonCallback("cityWarningOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_UNHEALTHY_DO_SO_NEXT", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_UNHEALTHY_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_UNHEALTHY_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_iAdvisorNags += 1
if (pCity.angryPopulation(0) > 0):
if (gc.getGame().getGameTurn() + 3) % 40 == pCity.getGameTurnFounded() % 40:
iBestValue = 0
eBestBuilding = BuildingTypes.NO_BUILDING
for iI in range(gc.getNumBuildingClassInfos()):
if (not isLimitedWonderClass(iI)):
eLoopBuilding = gc.getCivilizationInfo(gc.getPlayer(iPlayer).getCivilizationType()).getCivilizationBuildings(iI)
if (eLoopBuilding != BuildingTypes.NO_BUILDING):
if (gc.getBuildingInfo(eLoopBuilding).getHappiness() > 0):
if pCity.canConstruct(eLoopBuilding, False, False, False):
iValue = gc.getBuildingInfo(eLoopBuilding).getHappiness()
if (iValue > iBestValue):
iBestValue = iValue
eBestBuilding = eLoopBuilding
if (eBestBuilding != BuildingTypes.NO_BUILDING):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setData2(OrderTypes.ORDER_CONSTRUCT)
popupInfo.setData3(eBestBuilding)
popupInfo.setText(localText.getText("TXT_KEY_POPUP_UNHAPPY_CITIZENS_DEMAND", (pCity.getNameKey(), gc.getBuildingInfo(eBestBuilding).getTextKey())))
popupInfo.setOnClickedPythonCallback("cityWarningOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_UNHAPPY_DO_SO_NEXT", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_UNHAPPY_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_UNHEALTHY_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_iAdvisorNags += 1
if ((gc.getGame().getGameTurn < 100) and (gc.getTeam(gc.getPlayer(iPlayer).getTeam()).getHasMetCivCount(True) > 0) and (pCity.getBuildingDefense() == 0)):
if (gc.getGame().getGameTurn() + 4) % 40 == pCity.getGameTurnFounded() % 40:
iBestValue = 0
eBestBuilding = BuildingTypes.NO_BUILDING
for iI in range(gc.getNumBuildingClassInfos()):
if (not isLimitedWonderClass(iI)):
eLoopBuilding = gc.getCivilizationInfo(gc.getPlayer(iPlayer).getCivilizationType()).getCivilizationBuildings(iI)
if (eLoopBuilding != BuildingTypes.NO_BUILDING):
if (gc.getBuildingInfo(eLoopBuilding).getDefenseModifier() > pCity.getNaturalDefense()):
if pCity.canConstruct(eLoopBuilding, False, False, False):
iValue = gc.getBuildingInfo(eLoopBuilding).getDefenseModifier()
if (iValue > iBestValue):
iBestValue = iValue
eBestBuilding = eLoopBuilding
if (eBestBuilding != BuildingTypes.NO_BUILDING):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setData2(OrderTypes.ORDER_CONSTRUCT)
popupInfo.setData3(eBestBuilding)
popupInfo.setText(localText.getText("TXT_KEY_POPUP_BUILDING_DEFENSE_DEMAND", (pCity.getNameKey(), gc.getBuildingInfo(eBestBuilding).getTextKey())))
popupInfo.setOnClickedPythonCallback("cityWarningOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_AGREE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_iAdvisorNags += 1
if (pCity.getMaintenance() >= 8):
if (gc.getGame().getGameTurn() + 6) % 40 == pCity.getGameTurnFounded() % 40:
iBestValue = 0
eBestBuilding = BuildingTypes.NO_BUILDING
for iI in range(gc.getNumBuildingClassInfos()):
if (not isLimitedWonderClass(iI)):
eLoopBuilding = gc.getCivilizationInfo(gc.getPlayer(iPlayer).getCivilizationType()).getCivilizationBuildings(iI)
if (eLoopBuilding != BuildingTypes.NO_BUILDING):
if (gc.getBuildingInfo(eLoopBuilding).getMaintenanceModifier() < 0):
if pCity.canConstruct(eLoopBuilding, False, False, False):
iValue = gc.getBuildingInfo(eLoopBuilding).getMaintenanceModifier()
if (iValue < iBestValue):
iBestValue = iValue
eBestBuilding = eLoopBuilding
if (eBestBuilding != BuildingTypes.NO_BUILDING):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setData2(OrderTypes.ORDER_CONSTRUCT)
popupInfo.setData3(eBestBuilding)
popupInfo.setText(localText.getText("TXT_KEY_POPUP_MAINTENANCE_DEMAND", (pCity.getNameKey(), gc.getBuildingInfo(eBestBuilding).getTextKey())))
popupInfo.setOnClickedPythonCallback("cityWarningOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_AGREE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_iAdvisorNags += 1
if (pCity.getCommerceRate(CommerceTypes.COMMERCE_CULTURE) == 0 and not pCity.isOccupation()):
if (gc.getGame().getGameTurn() + 7) % 40 == pCity.getGameTurnFounded() % 40:
iBestValue = 0
eBestBuilding = BuildingTypes.NO_BUILDING
for iI in range(gc.getNumBuildingClassInfos()):
if (not isLimitedWonderClass(iI)):
eLoopBuilding = gc.getCivilizationInfo(gc.getPlayer(iPlayer).getCivilizationType()).getCivilizationBuildings(iI)
if (eLoopBuilding != BuildingTypes.NO_BUILDING):
if (gc.getBuildingInfo(eLoopBuilding).getObsoleteSafeCommerceChange(CommerceTypes.COMMERCE_CULTURE) > 0):
if pCity.canConstruct(eLoopBuilding, False, False, False):
iValue = gc.getBuildingInfo(eLoopBuilding).getObsoleteSafeCommerceChange(CommerceTypes.COMMERCE_CULTURE)
if (iValue > iBestValue):
iBestValue = iValue
eBestBuilding = eLoopBuilding
if (eBestBuilding != BuildingTypes.NO_BUILDING):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setData2(OrderTypes.ORDER_CONSTRUCT)
popupInfo.setData3(eBestBuilding)
popupInfo.setText(localText.getText("TXT_KEY_POPUP_CULTURE_DEMAND", (pCity.getNameKey(), gc.getBuildingInfo(eBestBuilding).getTextKey())))
popupInfo.setOnClickedPythonCallback("cityWarningOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_AGREE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_iAdvisorNags += 1
if (pCity.getBaseCommerceRate(CommerceTypes.COMMERCE_GOLD) > 10):
if (gc.getGame().getGameTurn() + 8) % 40 == pCity.getGameTurnFounded() % 40:
iBestValue = 0
eBestBuilding = BuildingTypes.NO_BUILDING
for iI in range(gc.getNumBuildingClassInfos()):
if (not isLimitedWonderClass(iI)):
eLoopBuilding = gc.getCivilizationInfo(gc.getPlayer(iPlayer).getCivilizationType()).getCivilizationBuildings(iI)
if (eLoopBuilding != BuildingTypes.NO_BUILDING):
if (gc.getBuildingInfo(eLoopBuilding).getCommerceModifier(CommerceTypes.COMMERCE_GOLD) > 0):
if pCity.canConstruct(eLoopBuilding, False, False, False):
iValue = gc.getBuildingInfo(eLoopBuilding).getCommerceModifier(CommerceTypes.COMMERCE_GOLD)
if (iValue > iBestValue):
iBestValue = iValue
eBestBuilding = eLoopBuilding
if (eBestBuilding != BuildingTypes.NO_BUILDING):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setData2(OrderTypes.ORDER_CONSTRUCT)
popupInfo.setData3(eBestBuilding)
popupInfo.setText(localText.getText("TXT_KEY_POPUP_GOLD_DEMAND", (pCity.getNameKey(), gc.getBuildingInfo(eBestBuilding).getTextKey())))
popupInfo.setOnClickedPythonCallback("cityWarningOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_AGREE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_iAdvisorNags += 1
if (pCity.getBaseCommerceRate(CommerceTypes.COMMERCE_RESEARCH) > 10):
if (gc.getGame().getGameTurn() + 10) % 40 == pCity.getGameTurnFounded() % 40:
iBestValue = 0
eBestBuilding = BuildingTypes.NO_BUILDING
for iI in range(gc.getNumBuildingClassInfos()):
if (not isLimitedWonderClass(iI)):
eLoopBuilding = gc.getCivilizationInfo(gc.getPlayer(iPlayer).getCivilizationType()).getCivilizationBuildings(iI)
if (eLoopBuilding != BuildingTypes.NO_BUILDING):
if (gc.getBuildingInfo(eLoopBuilding).getCommerceModifier(CommerceTypes.COMMERCE_RESEARCH) > 0):
if pCity.canConstruct(eLoopBuilding, False, False, False):
iValue = gc.getBuildingInfo(eLoopBuilding).getCommerceModifier(CommerceTypes.COMMERCE_RESEARCH)
if (iValue > iBestValue):
iBestValue = iValue
eBestBuilding = eLoopBuilding
if (eBestBuilding != BuildingTypes.NO_BUILDING):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setData2(OrderTypes.ORDER_CONSTRUCT)
popupInfo.setData3(eBestBuilding)
popupInfo.setText(localText.getText("TXT_KEY_POPUP_RESEARCH_DEMAND", (pCity.getNameKey(), gc.getBuildingInfo(eBestBuilding).getTextKey())))
popupInfo.setOnClickedPythonCallback("cityWarningOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_AGREE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_iAdvisorNags += 1
if (pCity.countNumWaterPlots() > 10):
if (gc.getGame().getGameTurn() + 11) % 40 == pCity.getGameTurnFounded() % 40:
iBestValue = 0
eBestBuilding = BuildingTypes.NO_BUILDING
for iI in range(gc.getNumBuildingClassInfos()):
if (not isLimitedWonderClass(iI)):
eLoopBuilding = gc.getCivilizationInfo(gc.getPlayer(iPlayer).getCivilizationType()).getCivilizationBuildings(iI)
if (eLoopBuilding != BuildingTypes.NO_BUILDING):
if (gc.getBuildingInfo(eLoopBuilding).getSeaPlotYieldChange(YieldTypes.YIELD_FOOD) > 0):
if pCity.canConstruct(eLoopBuilding, False, False, False):
iValue = gc.getBuildingInfo(eLoopBuilding).getSeaPlotYieldChange(YieldTypes.YIELD_FOOD)
if (iValue > iBestValue):
iBestValue = iValue
eBestBuilding = eLoopBuilding
if (eBestBuilding != BuildingTypes.NO_BUILDING):
popupInfo = CyPopupInfo()
popupInfo.setButtonPopupType(ButtonPopupTypes.BUTTONPOPUP_PYTHON)
popupInfo.setData1(pCity.getID())
popupInfo.setData2(OrderTypes.ORDER_CONSTRUCT)
popupInfo.setData3(eBestBuilding)
popupInfo.setText(localText.getText("TXT_KEY_POPUP_WATER_FOOD_DEMAND", (pCity.getNameKey(), gc.getBuildingInfo(eBestBuilding).getTextKey())))
popupInfo.setOnClickedPythonCallback("cityWarningOnClickedCallback")
popupInfo.setOnFocusPythonCallback("cityWarningOnFocusCallback")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_AGREE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_REFUSE", ()), "")
popupInfo.addPythonButton(localText.getText("TXT_KEY_POPUP_DEMAND_EXAMINE", ()), "")
popupInfo.addPopup(iPlayer)
g_iAdvisorNags += 1
| 49.722332 | 284 | 0.717522 |
4a275d87c5dba78fd0df02729a0ea463a2fab4f1 | 1,661 | py | Python | tests/test_app.py | mpdude/thumbor | 21799d4731ed0901dcd85a3025f6836412333e09 | [
"MIT"
] | 5 | 2018-09-15T07:57:21.000Z | 2019-06-04T08:02:10.000Z | tests/test_app.py | mpdude/thumbor | 21799d4731ed0901dcd85a3025f6836412333e09 | [
"MIT"
] | 3 | 2020-12-02T15:23:46.000Z | 2021-04-13T15:44:34.000Z | tests/test_app.py | mpdude/thumbor | 21799d4731ed0901dcd85a3025f6836412333e09 | [
"MIT"
] | 1 | 2021-07-29T19:14:53.000Z | 2021-07-29T19:14:53.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from unittest import TestCase
import mock
from preggy import expect
from thumbor.app import (
ThumborServiceApp
)
from thumbor.url import Url
class AppTestCase(TestCase):
def test_can_create_app(self):
ctx = mock.Mock()
app = ThumborServiceApp(ctx)
expect(app).not_to_be_null()
expect(app.context).to_equal(ctx)
def test_can_get_handlers(self):
ctx = mock.Mock(
config=mock.Mock(
UPLOAD_ENABLED=False,
USE_BLACKLIST=False,
)
)
app = ThumborServiceApp(ctx)
handlers = app.get_handlers()
expect(handlers).to_length(2)
expect(handlers[0][0]).to_equal(r'/healthcheck')
expect(handlers[1][0]).to_equal(Url.regex())
def test_can_get_handlers_with_upload(self):
ctx = mock.Mock(
config=mock.Mock(
UPLOAD_ENABLED=True,
USE_BLACKLIST=False,
)
)
app = ThumborServiceApp(ctx)
handlers = app.get_handlers()
expect(handlers).to_length(4)
def test_can_get_handlers_with_blacklist(self):
ctx = mock.Mock(
config=mock.Mock(
UPLOAD_ENABLED=False,
USE_BLACKLIST=True,
)
)
app = ThumborServiceApp(ctx)
handlers = app.get_handlers()
expect(handlers).to_length(3)
| 25.166667 | 56 | 0.606863 |
4a275fb0ba26af2f5482c9cc1ac99ae55dfc23c1 | 1,802 | py | Python | tools/localtest.py | kinsaurralde/lights | a9673188875dbd91bb8822c1920002a4f1c2ec5b | [
"MIT"
] | null | null | null | tools/localtest.py | kinsaurralde/lights | a9673188875dbd91bb8822c1920002a4f1c2ec5b | [
"MIT"
] | null | null | null | tools/localtest.py | kinsaurralde/lights | a9673188875dbd91bb8822c1920002a4f1c2ec5b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import time
import subprocess
import atexit
try:
import yaml # 3.6
except:
import ruamel.yaml as yaml # 3.7
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, help="Path to config file", default="configs/localtest.yaml",
)
parser.add_argument("-b", "--buildfolder", type=str, help="Path to buildfolder", default="../build/")
args = parser.parse_args()
def load_yaml(path):
with open(path) as open_file:
return yaml.safe_load(open_file)
def createControllerServer(port):
print(f"Creating controller server on port {port}")
subprocess.run(
[args.buildfolder + "raspberrypi/rpi_startup.sh", "--test", "--port", str(port),], check=True,
)
def createWebappServer():
print("Creating webapp server")
command = ["sudo", "python3", "app.py", "--test", "--debug"]
subprocess.run(command, cwd=f"{args.buildfolder + 'webapp'}", check=True)
def shutdownServers(name):
print(f"Shutdown {name} servers")
command = (
"sudo screen -ls "
+ name
+ " | grep -E '\s+[0-9]+\.' | awk -F ' ' '{print $1}' | "
+ "while read s; do sudo screen -XS $s quit; done"
)
subprocess.run(command, shell=True, check=True)
def setup():
config = load_yaml(args.config)
webapp_config_name = config["webapp_config"]
webapp_config = load_yaml(args.buildfolder + "webapp/config/controllers_" + webapp_config_name + ".yaml")
for i in range(len(webapp_config["controllers"])):
if webapp_config["controllers"][i]["active"] == "active":
createControllerServer(6000 + i)
time.sleep(3)
createWebappServer()
@atexit.register
def cleanup():
shutdownServers("rgb_server")
if __name__ == "__main__":
setup()
| 26.895522 | 109 | 0.649279 |
4a27610c0609c773eb34ba47872dac473af206b3 | 5,146 | py | Python | sure_tosca-client_python_stubs/test/test_default_api.py | QCDIS/CONF | 6ddb37b691754bbba97c85228d266ac050c4baa4 | [
"Apache-2.0"
] | null | null | null | sure_tosca-client_python_stubs/test/test_default_api.py | QCDIS/CONF | 6ddb37b691754bbba97c85228d266ac050c4baa4 | [
"Apache-2.0"
] | null | null | null | sure_tosca-client_python_stubs/test/test_default_api.py | QCDIS/CONF | 6ddb37b691754bbba97c85228d266ac050c4baa4 | [
"Apache-2.0"
] | 1 | 2021-04-05T09:49:03.000Z | 2021-04-05T09:49:03.000Z | # coding: utf-8
"""
tosca-sure
TOSCA Simple qUeRy sErvice (SURE). # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import unittest
from io import BytesIO
import urllib3
import sure_tosca_client
from sure_tosca_client import Configuration, ApiClient
from sure_tosca_client.api.default_api import DefaultApi # noqa: E501
from sure_tosca_client.rest import ApiException
class TestDefaultApi(unittest.TestCase):
"""DefaultApi unit test stubs"""
def setUp(self):
configuration = Configuration()
configuration.host = 'http://localhost:8081/tosca-sure/1.0.0' #Make sure we don't have '/' on the end of url
if self.service_is_up(configuration.host):
configuration.verify_ssl = False
api_client = ApiClient(configuration=configuration)
self.api = sure_tosca_client.api.default_api.DefaultApi(api_client=api_client) # noqa: E501
self.service_is_up = True
else:
self.service_is_up = False
def tearDown(self):
pass
def test_get_all_ancestor_properties(self):
"""Test case for get_all_ancestor_properties
# noqa: E501
"""
pass
def test_get_all_ancestor_types(self):
"""Test case for get_all_ancestor_types
# noqa: E501
"""
pass
def test_get_ancestors_requirements(self):
"""Test case for get_ancestors_requirements
# noqa: E501
"""
pass
def test_get_dsl_definitions(self):
"""Test case for get_dsl_definitions
# noqa: E501
"""
pass
def test_get_imports(self):
"""Test case for get_imports
# noqa: E501
"""
pass
def test_get_node_outputs(self):
"""Test case for get_node_outputs
# noqa: E501
"""
pass
def test_get_node_properties(self):
"""Test case for get_node_properties
# noqa: E501
"""
pass
def test_get_node_requirements(self):
"""Test case for get_node_requirements
"""
pass
def test_get_node_templates(self):
"""Test case for get_node_templates
"""
if self.service_is_up:
file_id = self.upload_tosca_template('application_example_provisioned.yaml')
node_templates = self.api.get_node_templates(file_id)
self.assertIsNotNone(node_templates)
nodes_to_deploy = self.api.get_node_templates(file_id,type_name = 'tosca.nodes.QC.Application')
def test_get_node_type_name(self):
"""Test case for get_node_type_name
# noqa: E501
"""
pass
def test_get_parent_type_name(self):
"""Test case for get_parent_type_name
# noqa: E501
"""
pass
def test_get_related_nodes(self):
"""Test case for get_related_nodes
# noqa: E501
"""
pass
def test_get_relationship_templates(self):
"""Test case for get_relationship_templates
# noqa: E501
"""
pass
def test_get_topology_template(self):
"""Test case for get_topology_template
"""
pass
def test_get_tosca_template(self):
"""Test case for get_tosca_template
"""
pass
def test_get_types(self):
"""Test case for get_types
# noqa: E501
"""
pass
def test_set_node_properties(self):
"""Test case for set_node_properties
# noqa: E501
"""
pass
def test_upload_tosca_template(self):
"""Test case for upload_tosca_template
upload a tosca template description file # noqa: E501
"""
if self.service_is_up:
file_id = self.upload_tosca_template('application_example_provisioned.yaml')
self.assertIsNotNone(file_id)
def get_tosca_file(self, file_name):
tosca_path = "../../TOSCA/"
input_tosca_file_path = tosca_path + '/' + file_name
if not os.path.exists(input_tosca_file_path):
tosca_path = "../TOSCA/"
input_tosca_file_path = tosca_path + '/' + file_name
dir_path = os.path.dirname(os.path.realpath(__file__))
self.assertEqual(True, os.path.exists(input_tosca_file_path),
'Starting from: ' + dir_path + ' Input TOSCA file: ' + input_tosca_file_path + ' not found')
return input_tosca_file_path
def upload_tosca_template(self, file_name):
if self.service_is_up:
file = self.get_tosca_file(file_name)
file_id = self.api.upload_tosca_template(file)
return file_id
def service_is_up(self, url):
code = None
try:
http = urllib3.PoolManager()
r = http.request('HEAD', url)
except Exception as e:
return False
return True
if __name__ == '__main__':
unittest.main()
| 24.859903 | 117 | 0.613875 |
4a2761da4dd15cbe9bc532b45c460627b5e3ae36 | 1,618 | py | Python | gw_bot/lambdas/gw/store/create_api_gw_api_key.py | atykhonov/GW-Bot | fb1d8584f229efe703f05aa0e44fa0924b90ca1d | [
"Apache-2.0"
] | null | null | null | gw_bot/lambdas/gw/store/create_api_gw_api_key.py | atykhonov/GW-Bot | fb1d8584f229efe703f05aa0e44fa0924b90ca1d | [
"Apache-2.0"
] | null | null | null | gw_bot/lambdas/gw/store/create_api_gw_api_key.py | atykhonov/GW-Bot | fb1d8584f229efe703f05aa0e44fa0924b90ca1d | [
"Apache-2.0"
] | null | null | null | import uuid
from gw_bot.helpers.Lambda_Helpers import log_to_elk
from osbot_aws.apis.API_Gateway import API_Gateway
def create_key_in_policy(policy_name, key_name):
api_gateway = API_Gateway()
usage_plan_id = api_gateway.usage_plan_id(policy_name)
new_key = api_gateway.api_key_create(key_name)
key_id = new_key.get('id')
key_value = new_key.get('value')
api_gateway.usage_plan_add_key(usage_plan_id, key_id)
return key_value
def create_key(params):
try:
order_id = params.get('order_id')
product_name = params.get('product_name')
if product_name == 'File Type Detection - 50 (Free)':
return create_key_in_policy('50 Free', f'50_{order_id}')
elif product_name == 'File Type Detection - 1000 files':
return create_key_in_policy('1k month', f'1000_{order_id}')
elif product_name == 'File Type Detection - 50,000 files':
return create_key_in_policy('50k month', f'50k_{order_id}')
elif product_name == 'File Type Detection - 500,000 files':
return create_key_in_policy('500k month', f'500k_{order_id}')
except Exception as error:
log_to_elk('error in create_key', f'{error}', level='error')
return f'{error}'
return uuid.uuid1()
def run(event, context):
params = event.get("queryStringParameters",{})
api_key = create_key(params)
log_to_elk('shopify new key request create_api_gw_key', f'{params}')
return {
"isBase64Encoded": False,
"statusCode": 200,
"headers": {},
"body": f'{api_key}'
} | 37.627907 | 73 | 0.662546 |
4a2762d79be63aca2854e3b0444cb625fa1b88ff | 2,603 | py | Python | gym_modular/wrappers/flatten_wrapper.py | TimSchneider42/mbpo | 736ba90bbdaddb2a40a6233bc0b78da72235100a | [
"MIT"
] | null | null | null | gym_modular/wrappers/flatten_wrapper.py | TimSchneider42/mbpo | 736ba90bbdaddb2a40a6233bc0b78da72235100a | [
"MIT"
] | null | null | null | gym_modular/wrappers/flatten_wrapper.py | TimSchneider42/mbpo | 736ba90bbdaddb2a40a6233bc0b78da72235100a | [
"MIT"
] | null | null | null | from typing import Dict, Tuple
import gym
import numpy as np
from .dict_flattener import DictFlattener
class FlattenWrapper(gym.Env):
"""
A wrapper for gym environments that converts gym.spaces.Dict observation and action spaces to gym.spaces.Box.
Needed since stable-baselines3 does not support Dict spaces.
"""
def __init__(self, wrapped_env: gym.Env):
assert isinstance(wrapped_env.action_space, gym.spaces.Dict), "The action space must be of type Dict"
assert isinstance(wrapped_env.observation_space, gym.spaces.Dict), "The observation space must be of type Dict"
self.__wrapped_env = wrapped_env
self.__action_flattener = DictFlattener(self.__wrapped_env.action_space)
self.__observation_flattener = DictFlattener(self.__wrapped_env.observation_space)
def reset(self) -> np.ndarray:
obs_dict = self.__wrapped_env.reset()
obs = self._pack_observation(obs_dict)
return obs
def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, Dict]:
action_dict = self._unpack_action(action)
obs_dict, reward, done, info = self.__wrapped_env.step(action_dict)
obs = self._pack_observation(obs_dict)
return obs, reward, done, info
def _pack_observation(self, obs: Dict[str, np.ndarray]) -> np.ndarray:
return self.__observation_flattener.pack_dict(obs)
def _unpack_observation(self, packed_obs: np.ndarray) -> Dict[str, np.ndarray]:
return self.__observation_flattener.unpack_dict(packed_obs)
def _pack_action(self, act: Dict[str, np.ndarray]) -> np.ndarray:
return self.__action_flattener.pack_dict(act)
def _unpack_action(self, packed_act: np.ndarray) -> Dict[str, np.ndarray]:
return self.__action_flattener.unpack_dict(packed_act)
def render(self, mode="human") -> None:
self.__wrapped_env.render(mode)
def close(self) -> None:
self.__wrapped_env.close()
@property
def action_space(self) -> gym.spaces.Space:
return self.__action_flattener.flattened_space
@property
def observation_space(self) -> gym.spaces.Dict:
return self.__observation_flattener.flattened_space
@property
def wrapped_env(self) -> gym.Env:
return self.__wrapped_env
@property
def unwrapped(self):
return self.__wrapped_env.unwrapped
@property
def observation_flattener(self) -> DictFlattener:
return self.__observation_flattener
@property
def action_flattener(self) -> DictFlattener:
return self.__action_flattener
| 34.706667 | 119 | 0.711103 |
4a2762f99d42099aee67604c2d8319f8894a7bd8 | 449 | py | Python | Strings/2087.py | LorranSutter/URI-Online-Judge | aef885b9a7caa83484cf172e29eea8ec92fc3627 | [
"MIT"
] | null | null | null | Strings/2087.py | LorranSutter/URI-Online-Judge | aef885b9a7caa83484cf172e29eea8ec92fc3627 | [
"MIT"
] | null | null | null | Strings/2087.py | LorranSutter/URI-Online-Judge | aef885b9a7caa83484cf172e29eea8ec92fc3627 | [
"MIT"
] | null | null | null | while True:
N = int(input())
if N == 0: break
P = [input() for k in range(N)]
ruim = False
l_p = len(P)
for k in range(l_p):
l = len(P[k])
for w in range(l_p):
if k != w and l <= len(P[w]):
if P[k] == P[w][:l]:
ruim = True
break
if ruim: break
if ruim:
print("Conjunto Ruim")
else:
print("Conjunto Bom")
| 22.45 | 41 | 0.405345 |
4a276430d0e74badc9536fb9a55652f79bc19854 | 2,778 | py | Python | dipy/denoise/non_local_means.py | omarocegueda/dipy | 520b724041116a958892bee0068b057314a21cb2 | [
"MIT"
] | null | null | null | dipy/denoise/non_local_means.py | omarocegueda/dipy | 520b724041116a958892bee0068b057314a21cb2 | [
"MIT"
] | null | null | null | dipy/denoise/non_local_means.py | omarocegueda/dipy | 520b724041116a958892bee0068b057314a21cb2 | [
"MIT"
] | null | null | null | from __future__ import division, print_function
import numpy as np
from dipy.denoise.nlmeans_block import nlmeans_block
def non_local_means(arr, sigma, mask=None, patch_radius=1, block_radius=5,
rician=True):
r""" Non-local means for denoising 3D and 4D images, using
blockwise averaging approach
Parameters
----------
arr : 3D or 4D ndarray
The array to be denoised
mask : 3D ndarray
sigma : float or 3D array
standard deviation of the noise estimated from the data
patch_radius : int
patch size is ``2 x patch_radius + 1``. Default is 1.
block_radius : int
block size is ``2 x block_radius + 1``. Default is 5.
rician : boolean
If True the noise is estimated as Rician, otherwise Gaussian noise
is assumed.
Returns
-------
denoised_arr : ndarray
the denoised ``arr`` which has the same shape as ``arr``.
References
----------
.. [Coupe08] P. Coupe, P. Yger, S. Prima, P. Hellier, C. Kervrann, C. Barillot,
An Optimized Blockwise Non Local Means Denoising Filter for 3D
Magnetic Resonance Images, IEEE Transactions on Medical Imaging,
27(4):425-441, 2008
.. [Coupe11] Pierrick Coupe, Jose Manjon, Montserrat Robles, Louis Collins.
Adaptive Multiresolution Non-Local Means Filter for 3D MR Image
Denoising IET Image Processing, Institution of Engineering and
Technology, 2011
"""
if mask is None and arr.ndim > 2:
mask = np.ones((arr.shape[0], arr.shape[1], arr.shape[2]), dtype='f8')
else:
mask = np.ascontiguousarray(mask, dtype='f8')
if mask.ndim != 3:
raise ValueError('mask needs to be a 3D ndarray', mask.shape)
if arr.ndim == 3:
sigma = np.ones(arr.shape, dtype=np.float64) * sigma
return np.array(nlmeans_block(
np.double(arr),
mask,
patch_radius,
block_radius,
sigma[0, 0, 0],
np.int(rician))).astype(arr.dtype)
elif arr.ndim == 4:
denoised_arr = np.zeros_like(arr)
if isinstance(sigma, np.ndarray) and sigma.ndim == 3:
sigma = (np.ones(arr.shape, dtype=np.float64) *
sigma[..., np.newaxis])
else:
sigma = np.ones(arr.shape, dtype=np.float64) * sigma
for i in range(arr.shape[-1]):
denoised_arr[..., i] = np.array(nlmeans_block(np.double(
arr[..., i]), mask, patch_radius, block_radius,
sigma[0, 0, 0, 0], np.int(rician))).astype(arr.dtype)
return denoised_arr
else:
raise ValueError("Only 3D or 4D array are supported!", arr.shape)
| 34.296296 | 83 | 0.593593 |
4a27643b985835407da85a54bb4ee65b8a29d1fa | 1,994 | py | Python | dota_webscraper/client_interface.py | alexpickering/dota_webscraper | 930c17f5563e6c778a820097517e7bb37e36bfe8 | [
"MIT"
] | null | null | null | dota_webscraper/client_interface.py | alexpickering/dota_webscraper | 930c17f5563e6c778a820097517e7bb37e36bfe8 | [
"MIT"
] | null | null | null | dota_webscraper/client_interface.py | alexpickering/dota_webscraper | 930c17f5563e6c778a820097517e7bb37e36bfe8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Prompts user input, parses into a dictionary of heroes and corresponding levels """
import json
import os
import re
import sys
from argparse import ArgumentParser
from gooey import Gooey, GooeyParser
def import_hero_list():
# import hardcoded hero_list
hero_list = []
with open('hero_list.txt', 'r') as f:
hero_list = [line.rstrip('\n') for line in f]
return hero_list
def format_request(heroes, all_lvl):
# regex separates entries into hero,lvl tuples
reg_obj = re.findall(r'([A-Za-z]+)([^A-Za-z]+)', heroes)
hero_list = import_hero_list()
outdict = {}
for pair in reg_obj:
# if multiple matches, takes first match (alphabetical)
hero = [listhero for listhero in hero_list if pair[0] in listhero.lower()][0]
lvls = []
for elt in pair[1].split(','):
print(elt)
if '-' in elt:
start, end = elt.split('-')
start = int(start.strip())
end = int(end.strip())
lvls += [lvl for lvl in range(start, end+1)]
elif elt.strip():
lvls.append(int(elt.strip()))
outdict[hero] = lvls
outdict['All Heroes'] = all_lvl
# saving dictionary as file
#with open('outdict.json','w+') as f:
# json.dump(outdict,f)
print(outdict)
return outdict
@Gooey
def set_gooey():
pass
def main():
if len(sys.argv) == 1:
set_gooey()
parser = ArgumentParser()
parser.add_argument('hero', action='extend', type=str, nargs='+', help='Hero and Level')
parser.add_argument('--all', action='store', default=1, type=str, help='Level for All-Heroes Display')
args = parser.parse_args()
heroes = args.hero
all_lvl = args.all
req = format_request(' '.join(heroes), all_lvl)
with open('request.json', 'w') as f:
json.dump(req, f)
#return req
if __name__ == '__main__':
main()
| 24.617284 | 106 | 0.594784 |
4a27644dd7940449724b856c7cc589e4790fba54 | 1,516 | pyde | Python | processing_visualizations/Voronoi_Whirl/Voronoi_Whirl.pyde | COLOR-SKY/RecursiveVoronoi | 24d98298bfcf52a7938557dda491e9d2b3dc9616 | [
"MIT"
] | 3 | 2021-01-25T17:27:35.000Z | 2022-03-22T12:07:16.000Z | processing_visualizations/Voronoi_Whirl/Voronoi_Whirl.pyde | COLOR-SKY/RecursiveVoronoi | 24d98298bfcf52a7938557dda491e9d2b3dc9616 | [
"MIT"
] | null | null | null | processing_visualizations/Voronoi_Whirl/Voronoi_Whirl.pyde | COLOR-SKY/RecursiveVoronoi | 24d98298bfcf52a7938557dda491e9d2b3dc9616 | [
"MIT"
] | null | null | null | """
Author: colorsky
Date: 2020/01/20
"""
import sys
# Replace with your environment dir
sys.path.append('/Python Project/venv_2.7/lib/python2.7/site-packages/')
import requests
def setup():
size(800, 800)
def draw():
background(0)
noFill()
num_points = 10
seed = 999
mask_polygon = [[20, 20], [width - 20, 20],
[width - 20, height - 20], [20, height - 20]]
baseapi = "http://127.0.0.1:5699"
points = requests.post(baseapi + "/RandomPointsInsidePolygon", json={
"polygon": mask_polygon,
"n": 10,
"seed": 10
}).json()
voronoi_regions = requests.post(baseapi + "/ClippedVoronoi", json={
"polygon": mask_polygon,
"points": points
}).json()
interpolations = requests.post(baseapi + "/PolygonsInterpolate", json={
"polygons": voronoi_regions,
"displacement": 1,
"min_area": 5,
"max_iter": 1000
}).json()
for interpolated in interpolations:
for iteration in interpolated:
stroke(map(int(iteration), 0, len(interpolated), 255, 0))
strokeWeight(0.3)
polygon = interpolated[iteration]
p = createShape()
p.beginShape()
for x, y in polygon:
p.vertex(x, y)
p.endShape(CLOSE)
shape(p)
filetitle = "N" + str(num_points) + "S" + str(seed)
# Add text
fill(255)
textSize(12)
textAlign(CENTER)
text(filetitle, width / 2, 15)
noLoop()
| 27.071429 | 75 | 0.566623 |
4a2764e86e7ce34f8609ce47fdf27d7a5be482cf | 54,206 | bzl | Python | tensorflow/workspace2.bzl | vadimVoloshanov/tensorflow | 15c4071f8c2422dec79c17e5294969f0fdbd22a3 | [
"Apache-2.0"
] | 1 | 2021-06-17T17:07:40.000Z | 2021-06-17T17:07:40.000Z | tensorflow/workspace2.bzl | vadimVoloshanov/tensorflow | 15c4071f8c2422dec79c17e5294969f0fdbd22a3 | [
"Apache-2.0"
] | 2 | 2021-11-10T20:10:39.000Z | 2022-02-10T05:15:31.000Z | tensorflow/workspace2.bzl | vadimVoloshanov/tensorflow | 15c4071f8c2422dec79c17e5294969f0fdbd22a3 | [
"Apache-2.0"
] | 1 | 2021-04-20T18:26:18.000Z | 2021-04-20T18:26:18.000Z | """TensorFlow workspace initialization. Consult the WORKSPACE on how to use it."""
# Import third party config rules.
load("//tensorflow:version_check.bzl", "check_bazel_version_at_least")
load("//third_party/gpus:cuda_configure.bzl", "cuda_configure")
load("//third_party/gpus:rocm_configure.bzl", "rocm_configure")
load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure")
load("//third_party/nccl:nccl_configure.bzl", "nccl_configure")
load("//third_party/git:git_configure.bzl", "git_configure")
load("//third_party/py:python_configure.bzl", "python_configure")
load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure")
load("//third_party/toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure")
load("//third_party/toolchains/embedded/arm-linux:arm_linux_toolchain_configure.bzl", "arm_linux_toolchain_configure")
load("//third_party:repo.bzl", "tf_http_archive")
load("//third_party/clang_toolchain:cc_configure_clang.bzl", "cc_download_clang_toolchain")
load("//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl", "def_file_filter_configure")
# Import third party repository rules. See go/tfbr-thirdparty.
load("//third_party/FP16:workspace.bzl", FP16 = "repo")
load("//third_party/absl:workspace.bzl", absl = "repo")
load("//third_party/aws:workspace.bzl", aws = "repo")
load("//third_party/clog:workspace.bzl", clog = "repo")
load("//third_party/cpuinfo:workspace.bzl", cpuinfo = "repo")
load("//third_party/dlpack:workspace.bzl", dlpack = "repo")
load("//third_party/eigen3:workspace.bzl", eigen3 = "repo")
load("//third_party/farmhash:workspace.bzl", farmhash = "repo")
load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
load("//third_party/gemmlowp:workspace.bzl", gemmlowp = "repo")
load("//third_party/hexagon:workspace.bzl", hexagon_nn = "repo")
load("//third_party/highwayhash:workspace.bzl", highwayhash = "repo")
load("//third_party/hwloc:workspace.bzl", hwloc = "repo")
load("//third_party/icu:workspace.bzl", icu = "repo")
load("//third_party/jpeg:workspace.bzl", jpeg = "repo")
load("//third_party/llvm:workspace.bzl", llvm = "repo")
load("//third_party/nasm:workspace.bzl", nasm = "repo")
load("//third_party/opencl_headers:workspace.bzl", opencl_headers = "repo")
load("//third_party/kissfft:workspace.bzl", kissfft = "repo")
load("//third_party/pasta:workspace.bzl", pasta = "repo")
load("//third_party/psimd:workspace.bzl", psimd = "repo")
load("//third_party/ruy:workspace.bzl", ruy = "repo")
load("//third_party/sobol_data:workspace.bzl", sobol_data = "repo")
load("//third_party/vulkan_headers:workspace.bzl", vulkan_headers = "repo")
# Import external repository rules.
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
load("@tf_runtime//:dependencies.bzl", "tfrt_dependencies")
load("@tf_toolchains//toolchains/remote_config:configs.bzl", "initialize_rbe_configs")
load("@tf_toolchains//toolchains/remote:configure.bzl", "remote_execution_configure")
load("@tf_toolchains//toolchains/clang6:repo.bzl", "clang6_configure")
def _initialize_third_party():
""" Load third party repositories. See above load() statements. """
FP16()
absl()
aws()
clog()
cpuinfo()
dlpack()
eigen3()
farmhash()
flatbuffers()
gemmlowp()
hexagon_nn()
highwayhash()
hwloc()
icu()
jpeg()
kissfft()
nasm()
opencl_headers()
pasta()
psimd()
ruy()
sobol_data()
vulkan_headers()
# Toolchains & platforms required by Tensorflow to build.
def _tf_toolchains():
native.register_execution_platforms("@local_execution_config_platform//:platform")
native.register_toolchains("@local_execution_config_python//:py_toolchain")
# Loads all external repos to configure RBE builds.
initialize_rbe_configs()
# Note that we check the minimum bazel version in WORKSPACE.
clang6_configure(name = "local_config_clang6")
cc_download_clang_toolchain(name = "local_config_download_clang")
cuda_configure(name = "local_config_cuda")
tensorrt_configure(name = "local_config_tensorrt")
nccl_configure(name = "local_config_nccl")
git_configure(name = "local_config_git")
syslibs_configure(name = "local_config_syslibs")
python_configure(name = "local_config_python")
rocm_configure(name = "local_config_rocm")
remote_execution_configure(name = "local_config_remote_execution")
# For windows bazel build
# TODO: Remove def file filter when TensorFlow can export symbols properly on Windows.
def_file_filter_configure(name = "local_config_def_file_filter")
# Point //external/local_config_arm_compiler to //external/arm_compiler
arm_compiler_configure(
name = "local_config_arm_compiler",
build_file = "//third_party/toolchains/cpus/arm:BUILD",
remote_config_repo_arm = "../arm_compiler",
remote_config_repo_aarch64 = "../aarch64_compiler",
)
# TFLite crossbuild toolchain for embeddeds Linux
arm_linux_toolchain_configure(
name = "local_config_embedded_arm",
build_file = "//third_party/toolchains/embedded/arm-linux:BUILD",
aarch64_repo = "../aarch64_linux_toolchain",
armhf_repo = "../armhf_linux_toolchain",
)
# Define all external repositories required by TensorFlow
def _tf_repositories():
"""All external dependencies for TF builds."""
# To update any of the dependencies bellow:
# a) update URL and strip_prefix to the new git commit hash
# b) get the sha256 hash of the commit by running:
# curl -L <url> | sha256sum
# and update the sha256 with the result.
tf_http_archive(
name = "XNNPACK",
sha256 = "5482fb0fcdc1df8b4842f8edf944443ea67ffe712a5cd846f0af484abe4f9a79",
strip_prefix = "XNNPACK-8f15372eb67ffab0d54cfe3752acaf8f8415af17",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/8f15372eb67ffab0d54cfe3752acaf8f8415af17.zip",
"https://github.com/google/XNNPACK/archive/8f15372eb67ffab0d54cfe3752acaf8f8415af17.zip",
],
)
tf_http_archive(
name = "FXdiv",
sha256 = "3d7b0e9c4c658a84376a1086126be02f9b7f753caa95e009d9ac38d11da444db",
strip_prefix = "FXdiv-63058eff77e11aa15bf531df5dd34395ec3017c8",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/Maratyszcza/FXdiv/archive/63058eff77e11aa15bf531df5dd34395ec3017c8.zip",
"https://github.com/Maratyszcza/FXdiv/archive/63058eff77e11aa15bf531df5dd34395ec3017c8.zip",
],
)
tf_http_archive(
name = "pthreadpool",
sha256 = "b96413b10dd8edaa4f6c0a60c6cf5ef55eebeef78164d5d69294c8173457f0ec",
strip_prefix = "pthreadpool-b8374f80e42010941bda6c85b0e3f1a1bd77a1e0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/Maratyszcza/pthreadpool/archive/b8374f80e42010941bda6c85b0e3f1a1bd77a1e0.zip",
"https://github.com/Maratyszcza/pthreadpool/archive/b8374f80e42010941bda6c85b0e3f1a1bd77a1e0.zip",
],
)
tf_http_archive(
name = "cudnn_frontend_archive",
build_file = "//third_party:cudnn_frontend.BUILD",
patch_file = "//third_party:cudnn_frontend_header_fix.patch",
sha256 = "498f908ced41bbf524af6b89dc4229d5cc89311bfaaed1e3794981e858629196",
strip_prefix = "cudnn-frontend-360d6e7164dfb7c802493fd1c0464f0d815b852a",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NVIDIA/cudnn-frontend/archive/360d6e7164dfb7c802493fd1c0464f0d815b852a.zip",
"https://github.com/NVIDIA/cudnn-frontend/archive/360d6e7164dfb7c802493fd1c0464f0d815b852a.zip",
],
)
tf_http_archive(
name = "mkl_dnn",
build_file = "//third_party/mkl_dnn:mkldnn.BUILD",
sha256 = "a0211aeb5e7dad50b97fa5dffc1a2fe2fe732572d4164e1ee8750a2ede43fbec",
strip_prefix = "oneDNN-0.21.3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/oneapi-src/oneDNN/archive/v0.21.3.tar.gz",
"https://github.com/oneapi-src/oneDNN/archive/v0.21.3.tar.gz",
],
)
tf_http_archive(
name = "mkl_dnn_v1",
build_file = "//third_party/mkl_dnn:mkldnn_v1.BUILD",
sha256 = "4d655c0751ee6439584ef5e3d465953fe0c2f4ee2700bc02699bdc1d1572af0d",
strip_prefix = "oneDNN-2.2",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/oneapi-src/oneDNN/archive/v2.2.tar.gz",
"https://github.com/oneapi-src/oneDNN/archive/v2.2.tar.gz",
],
)
tf_http_archive(
name = "mkl_dnn_acl_compatible",
build_file = "//third_party/mkl_dnn:mkldnn_acl.BUILD",
sha256 = "4d655c0751ee6439584ef5e3d465953fe0c2f4ee2700bc02699bdc1d1572af0d",
strip_prefix = "oneDNN-2.2",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/oneapi-src/oneDNN/archive/v2.2.tar.gz",
"https://github.com/oneapi-src/oneDNN/archive/v2.2.tar.gz",
],
)
tf_http_archive(
name = "compute_library",
sha256 = "cdb3d8a7ab7ea13f0df207a20657f2827ac631c24aa0e8487bacf97697237bdf",
strip_prefix = "ComputeLibrary-21.02",
build_file = "//third_party/compute_library:BUILD",
patch_file = "//third_party/compute_library:compute_library.patch",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/ARM-software/ComputeLibrary/archive/v21.02.tar.gz",
"https://github.com/ARM-software/ComputeLibrary/archive/v21.02.tar.gz",
],
)
tf_http_archive(
name = "arm_compiler",
build_file = "//:arm_compiler.BUILD",
sha256 = "b9e7d50ffd9996ed18900d041d362c99473b382c0ae049b2fce3290632d2656f",
strip_prefix = "rpi-newer-crosstools-eb68350c5c8ec1663b7fe52c742ac4271e3217c5/x64-gcc-6.5.0/arm-rpi-linux-gnueabihf/",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
"https://github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
],
)
tf_http_archive(
# This is the latest `aarch64-none-linux-gnu` compiler provided by ARM
# See https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-a/downloads
# The archive contains GCC version 9.2.1
name = "aarch64_compiler",
build_file = "//:arm_compiler.BUILD",
sha256 = "8dfe681531f0bd04fb9c53cf3c0a3368c616aa85d48938eebe2b516376e06a66",
strip_prefix = "gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/-/media/Files/downloads/gnu-a/9.2-2019.12/binrel/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz",
"https://developer.arm.com/-/media/Files/downloads/gnu-a/9.2-2019.12/binrel/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz",
],
)
tf_http_archive(
name = "aarch64_linux_toolchain",
build_file = "//third_party/toolchains/embedded/arm-linux:aarch64-linux-toolchain.BUILD",
sha256 = "8ce3e7688a47d8cd2d8e8323f147104ae1c8139520eca50ccf8a7fa933002731",
strip_prefix = "gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz",
"https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz",
],
)
tf_http_archive(
name = "armhf_linux_toolchain",
build_file = "//third_party/toolchains/embedded/arm-linux:armhf-linux-toolchain.BUILD",
sha256 = "d4f6480ecaa99e977e3833cc8a8e1263f9eecd1ce2d022bb548a24c4f32670f5",
strip_prefix = "gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz",
"https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz",
],
)
tf_http_archive(
name = "libxsmm_archive",
build_file = "//third_party:libxsmm.BUILD",
sha256 = "9c0af4509ea341d1ee2c6c19fc6f19289318c3bd4b17844efeb9e7f9691abf76",
strip_prefix = "libxsmm-1.14",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/hfp/libxsmm/archive/1.14.tar.gz",
"https://github.com/hfp/libxsmm/archive/1.14.tar.gz",
],
)
tf_http_archive(
name = "com_googlesource_code_re2",
sha256 = "d070e2ffc5476c496a6a872a6f246bfddce8e7797d6ba605a7c8d72866743bf9",
strip_prefix = "re2-506cfa4bffd060c06ec338ce50ea3468daa6c814",
system_build_file = "//third_party/systemlibs:re2.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
"https://github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
],
)
tf_http_archive(
name = "com_github_google_crc32c",
sha256 = "6b3b1d861bb8307658b2407bc7a4c59e566855ef5368a60b35c893551e4788e9",
build_file = "@com_github_googlecloudplatform_google_cloud_cpp//bazel:crc32c.BUILD",
strip_prefix = "crc32c-1.0.6",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/crc32c/archive/1.0.6.tar.gz",
"https://github.com/google/crc32c/archive/1.0.6.tar.gz",
],
)
tf_http_archive(
name = "com_github_googlecloudplatform_google_cloud_cpp",
sha256 = "ff82045b9491f0d880fc8e5c83fd9542eafb156dcac9ff8c6209ced66ed2a7f0",
strip_prefix = "google-cloud-cpp-1.17.1",
repo_mapping = {
"@com_github_curl_curl": "@curl",
"@com_github_nlohmann_json": "@nlohmann_json_lib",
},
system_build_file = "//third_party/systemlibs:google_cloud_cpp.BUILD",
system_link_files = {
"//third_party/systemlibs:google_cloud_cpp.google.cloud.bigtable.BUILD": "google/cloud/bigtable/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/google-cloud-cpp/archive/v1.17.1.tar.gz",
"https://github.com/googleapis/google-cloud-cpp/archive/v1.17.1.tar.gz",
],
)
tf_http_archive(
name = "com_github_googlecloudplatform_tensorflow_gcp_tools",
sha256 = "5e9ebe17eaa2895eb7f77fefbf52deeda7c4b63f5a616916b823eb74f3a0c542",
strip_prefix = "tensorflow-gcp-tools-2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/GoogleCloudPlatform/tensorflow-gcp-tools/archive/2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5.tar.gz",
"https://github.com/GoogleCloudPlatform/tensorflow-gcp-tools/archive/2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5.tar.gz",
],
)
tf_http_archive(
name = "com_google_googleapis",
build_file = "//third_party/googleapis:googleapis.BUILD",
sha256 = "7ebab01b06c555f4b6514453dc3e1667f810ef91d1d4d2d3aa29bb9fcb40a900",
strip_prefix = "googleapis-541b1ded4abadcc38e8178680b0677f65594ea6f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/googleapis/archive/541b1ded4abadcc38e8178680b0677f65594ea6f.zip",
"https://github.com/googleapis/googleapis/archive/541b1ded4abadcc38e8178680b0677f65594ea6f.zip",
],
)
tf_http_archive(
name = "png",
build_file = "//third_party:png.BUILD",
patch_file = "//third_party:png_fix_rpi.patch",
sha256 = "ca74a0dace179a8422187671aee97dd3892b53e168627145271cad5b5ac81307",
strip_prefix = "libpng-1.6.37",
system_build_file = "//third_party/systemlibs:png.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
"https://github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
],
)
tf_http_archive(
name = "org_sqlite",
build_file = "//third_party:sqlite.BUILD",
sha256 = "a629d0b1cc301347109e8ad211ff46af371b6ef73c41b7698e9cf1fb37bf4b95",
strip_prefix = "sqlite-amalgamation-3350300",
system_build_file = "//third_party/systemlibs:sqlite.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/sqlite.org/2021/sqlite-amalgamation-3350300.zip",
"https://sqlite.org/2021/sqlite-amalgamation-3350300.zip",
],
)
tf_http_archive(
name = "gif",
build_file = "//third_party:gif.BUILD",
patch_file = "//third_party:gif_fix_strtok_r.patch",
sha256 = "31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd",
strip_prefix = "giflib-5.2.1",
system_build_file = "//third_party/systemlibs:gif.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
"https://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
],
)
tf_http_archive(
name = "six_archive",
build_file = "//third_party:six.BUILD",
sha256 = "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
strip_prefix = "six-1.15.0",
system_build_file = "//third_party/systemlibs:six.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/source/s/six/six-1.15.0.tar.gz",
"https://pypi.python.org/packages/source/s/six/six-1.15.0.tar.gz",
],
)
tf_http_archive(
name = "astor_archive",
build_file = "//third_party:astor.BUILD",
sha256 = "95c30d87a6c2cf89aa628b87398466840f0ad8652f88eb173125a6df8533fb8d",
strip_prefix = "astor-0.7.1",
system_build_file = "//third_party/systemlibs:astor.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
"https://pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
],
)
tf_http_archive(
name = "astunparse_archive",
build_file = "//third_party:astunparse.BUILD",
sha256 = "5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872",
strip_prefix = "astunparse-1.6.3/lib",
system_build_file = "//third_party/systemlibs:astunparse.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz",
"https://files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz",
],
)
filegroup_external(
name = "astunparse_license",
licenses = ["notice"], # PSFL
sha256_urls = {
"92fc0e4f4fa9460558eedf3412b988d433a2dcbb3a9c45402a145a4fab8a6ac6": [
"https://storage.googleapis.com/mirror.tensorflow.org/raw.githubusercontent.com/simonpercivall/astunparse/v1.6.2/LICENSE",
"https://raw.githubusercontent.com/simonpercivall/astunparse/v1.6.2/LICENSE",
],
},
)
tf_http_archive(
name = "functools32_archive",
build_file = "//third_party:functools32.BUILD",
sha256 = "f6253dfbe0538ad2e387bd8fdfd9293c925d63553f5813c4e587745416501e6d",
strip_prefix = "functools32-3.2.3-2",
system_build_file = "//third_party/systemlibs:functools32.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
"https://pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
],
)
tf_http_archive(
name = "gast_archive",
build_file = "//third_party:gast.BUILD",
sha256 = "40feb7b8b8434785585ab224d1568b857edb18297e5a3047f1ba012bc83b42c1",
strip_prefix = "gast-0.4.0",
system_build_file = "//third_party/systemlibs:gast.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/files.pythonhosted.org/packages/12/59/eaa15ab9710a20e22225efd042cd2d6a0b559a0656d5baba9641a2a4a921/gast-0.4.0.tar.gz",
"https://files.pythonhosted.org/packages/83/4a/07c7e59cef23fb147454663c3271c21da68ba2ab141427c20548ae5a8a4d/gast-0.4.0.tar.gz",
],
)
tf_http_archive(
name = "termcolor_archive",
build_file = "//third_party:termcolor.BUILD",
sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
strip_prefix = "termcolor-1.1.0",
system_build_file = "//third_party/systemlibs:termcolor.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
"https://pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
],
)
tf_http_archive(
name = "typing_extensions_archive",
build_file = "//third_party:typing_extensions.BUILD",
sha256 = "79ee589a3caca649a9bfd2a8de4709837400dfa00b6cc81962a1e6a1815969ae",
strip_prefix = "typing_extensions-3.7.4.2/src_py3",
system_build_file = "//third_party/systemlibs:typing_extensions.BUILD",
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/6a/28/d32852f2af6b5ead85d396249d5bdf450833f3a69896d76eb480d9c5e406/typing_extensions-3.7.4.2.tar.gz",
"https://files.pythonhosted.org/packages/6a/28/d32852f2af6b5ead85d396249d5bdf450833f3a69896d76eb480d9c5e406/typing_extensions-3.7.4.2.tar.gz",
],
)
filegroup_external(
name = "typing_extensions_license",
licenses = ["notice"], # PSFL
sha256_urls = {
"ff17ce94e102024deb68773eb1cc74ca76da4e658f373531f0ac22d68a6bb1ad": [
"http://mirror.tensorflow.org/raw.githubusercontent.com/python/typing/master/typing_extensions/LICENSE",
"https://raw.githubusercontent.com/python/typing/master/typing_extensions/LICENSE",
],
},
)
tf_http_archive(
name = "opt_einsum_archive",
build_file = "//third_party:opt_einsum.BUILD",
sha256 = "d3d464b4da7ef09e444c30e4003a27def37f85ff10ff2671e5f7d7813adac35b",
strip_prefix = "opt_einsum-2.3.2",
system_build_file = "//third_party/systemlibs:opt_einsum.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
"https://pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
],
)
tf_http_archive(
name = "absl_py",
sha256 = "603febc9b95a8f2979a7bdb77d2f5e4d9b30d4e0d59579f88eba67d4e4cc5462",
strip_prefix = "abseil-py-pypi-v0.9.0",
system_build_file = "//third_party/systemlibs:absl_py.BUILD",
system_link_files = {
"//third_party/systemlibs:absl_py.absl.BUILD": "absl/BUILD",
"//third_party/systemlibs:absl_py.absl.flags.BUILD": "absl/flags/BUILD",
"//third_party/systemlibs:absl_py.absl.testing.BUILD": "absl/testing/BUILD",
"//third_party/systemlibs:absl_py.absl.logging.BUILD": "absl/logging/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz",
"https://github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz",
],
)
tf_http_archive(
name = "enum34_archive",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
"https://pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
],
sha256 = "8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1",
build_file = "//third_party:enum34.BUILD",
system_build_file = "//third_party/systemlibs:enum34.BUILD",
strip_prefix = "enum34-1.1.6/enum",
)
tf_http_archive(
name = "org_python_pypi_backports_weakref",
build_file = "//third_party:backports_weakref.BUILD",
sha256 = "8813bf712a66b3d8b85dc289e1104ed220f1878cf981e2fe756dfaabe9a82892",
strip_prefix = "backports.weakref-1.0rc1/src",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
"https://pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
],
)
tf_http_archive(
name = "dill_archive",
build_file = "//third_party:dill.BUILD",
system_build_file = "//third_party/systemlibs:dill.BUILD",
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/e2/96/518a8ea959a734b70d2e95fef98bcbfdc7adad1c1e5f5dd9148c835205a5/dill-0.3.2.zip",
"https://files.pythonhosted.org/packages/e2/96/518a8ea959a734b70d2e95fef98bcbfdc7adad1c1e5f5dd9148c835205a5/dill-0.3.2.zip",
],
sha256 = "6e12da0d8e49c220e8d6e97ee8882002e624f1160289ce85ec2cc0a5246b3a2e",
strip_prefix = "dill-0.3.2",
)
tf_http_archive(
name = "tblib_archive",
build_file = "//third_party:tblib.BUILD",
system_build_file = "//third_party/systemlibs:tblib.BUILD",
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/d3/41/901ef2e81d7b1e834b9870d416cb09479e175a2be1c4aa1a9dcd0a555293/tblib-1.7.0.tar.gz",
"https://files.pythonhosted.org/packages/d3/41/901ef2e81d7b1e834b9870d416cb09479e175a2be1c4aa1a9dcd0a555293/tblib-1.7.0.tar.gz",
],
sha256 = "059bd77306ea7b419d4f76016aef6d7027cc8a0785579b5aad198803435f882c",
strip_prefix = "tblib-1.7.0",
)
filegroup_external(
name = "org_python_license",
licenses = ["notice"], # Python 2.0
sha256_urls = {
"e76cacdf0bdd265ff074ccca03671c33126f597f39d0ed97bc3e5673d9170cf6": [
"https://storage.googleapis.com/mirror.tensorflow.org/docs.python.org/2.7/_sources/license.rst.txt",
"https://docs.python.org/2.7/_sources/license.rst.txt",
],
},
)
tf_http_archive(
name = "com_google_protobuf",
patch_file = "//third_party/protobuf:protobuf.patch",
sha256 = "cfcba2df10feec52a84208693937c17a4b5df7775e1635c1e3baffc487b24c9b",
strip_prefix = "protobuf-3.9.2",
system_build_file = "//third_party/systemlibs:protobuf.BUILD",
system_link_files = {
"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/v3.9.2.zip",
"https://github.com/protocolbuffers/protobuf/archive/v3.9.2.zip",
],
)
tf_http_archive(
name = "nsync",
sha256 = "caf32e6b3d478b78cff6c2ba009c3400f8251f646804bcb65465666a9cea93c4",
strip_prefix = "nsync-1.22.0",
system_build_file = "//third_party/systemlibs:nsync.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/nsync/archive/1.22.0.tar.gz",
"https://github.com/google/nsync/archive/1.22.0.tar.gz",
],
)
tf_http_archive(
name = "com_google_googletest",
sha256 = "ff7a82736e158c077e76188232eac77913a15dac0b22508c390ab3f88e6d6d86",
strip_prefix = "googletest-b6cd405286ed8635ece71c72f118e659f4ade3fb",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
"https://github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
],
)
tf_http_archive(
name = "com_github_gflags_gflags",
sha256 = "ae27cdbcd6a2f935baa78e4f21f675649271634c092b1be01469440495609d0e",
strip_prefix = "gflags-2.2.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/gflags/gflags/archive/v2.2.1.tar.gz",
"https://github.com/gflags/gflags/archive/v2.2.1.tar.gz",
],
)
tf_http_archive(
name = "pcre",
build_file = "//third_party:pcre.BUILD",
sha256 = "aecafd4af3bd0f3935721af77b889d9024b2e01d96b58471bd91a3063fb47728",
strip_prefix = "pcre-8.44",
system_build_file = "//third_party/systemlibs:pcre.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.44.tar.gz",
"https://ftp.exim.org/pub/pcre/pcre-8.44.tar.gz",
],
)
tf_http_archive(
name = "curl",
build_file = "//third_party:curl.BUILD",
sha256 = "3b4378156ba09e224008e81dcce854b7ce4d182b1f9cfb97fe5ed9e9c18c6bd3",
strip_prefix = "curl-7.76.0",
system_build_file = "//third_party/systemlibs:curl.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.76.0.tar.gz",
"https://curl.haxx.se/download/curl-7.76.0.tar.gz",
],
)
# WARNING: make sure ncteisen@ and vpai@ are cc-ed on any CL to change the below rule
tf_http_archive(
name = "com_github_grpc_grpc",
sha256 = "b956598d8cbe168b5ee717b5dafa56563eb5201a947856a6688bbeac9cac4e1f",
strip_prefix = "grpc-b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd",
system_build_file = "//third_party/systemlibs:grpc.BUILD",
patch_file = "//third_party/grpc:generate_cc_env_fix.patch",
system_link_files = {
"//third_party/systemlibs:BUILD": "bazel/BUILD",
"//third_party/systemlibs:grpc.BUILD": "src/compiler/BUILD",
"//third_party/systemlibs:grpc.bazel.grpc_deps.bzl": "bazel/grpc_deps.bzl",
"//third_party/systemlibs:grpc.bazel.grpc_extra_deps.bzl": "bazel/grpc_extra_deps.bzl",
"//third_party/systemlibs:grpc.bazel.cc_grpc_library.bzl": "bazel/cc_grpc_library.bzl",
"//third_party/systemlibs:grpc.bazel.generate_cc.bzl": "bazel/generate_cc.bzl",
"//third_party/systemlibs:grpc.bazel.protobuf.bzl": "bazel/protobuf.bzl",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz",
"https://github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz",
],
)
tf_http_archive(
name = "linenoise",
build_file = "//third_party:linenoise.BUILD",
sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
"https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
],
)
llvm("llvm-project")
# Intel openMP that is part of LLVM sources.
tf_http_archive(
name = "llvm_openmp",
build_file = "//third_party/llvm_openmp:BUILD",
sha256 = "d19f728c8e04fb1e94566c8d76aef50ec926cd2f95ef3bf1e0a5de4909b28b44",
strip_prefix = "openmp-10.0.1.src",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/releases/download/llvmorg-10.0.1/openmp-10.0.1.src.tar.xz",
"https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.1/openmp-10.0.1.src.tar.xz",
],
)
tf_http_archive(
name = "lmdb",
build_file = "//third_party:lmdb.BUILD",
sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
system_build_file = "//third_party/systemlibs:lmdb.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
"https://github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
],
)
tf_http_archive(
name = "jsoncpp_git",
build_file = "//third_party:jsoncpp.BUILD",
sha256 = "77a402fb577b2e0e5d0bdc1cf9c65278915cdb25171e3452c68b6da8a561f8f0",
strip_prefix = "jsoncpp-1.9.2",
system_build_file = "//third_party/systemlibs:jsoncpp.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz",
"https://github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz",
],
)
tf_http_archive(
name = "boringssl",
sha256 = "a9c3b03657d507975a32732f04563132b4553c20747cec6dc04de475c8bdf29f",
strip_prefix = "boringssl-80ca9f9f6ece29ab132cce4cf807a9465a18cfac",
system_build_file = "//third_party/systemlibs:boringssl.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/boringssl/archive/80ca9f9f6ece29ab132cce4cf807a9465a18cfac.tar.gz",
"https://github.com/google/boringssl/archive/80ca9f9f6ece29ab132cce4cf807a9465a18cfac.tar.gz",
],
)
tf_http_archive(
name = "zlib",
build_file = "//third_party:zlib.BUILD",
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
system_build_file = "//third_party/systemlibs:zlib.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/zlib.net/zlib-1.2.11.tar.gz",
"https://zlib.net/zlib-1.2.11.tar.gz",
],
)
tf_http_archive(
name = "fft2d",
build_file = "//third_party/fft2d:fft2d.BUILD",
sha256 = "5f4dabc2ae21e1f537425d58a49cdca1c49ea11db0d6271e2a4b27e9697548eb",
strip_prefix = "OouraFFT-1.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/petewarden/OouraFFT/archive/v1.0.tar.gz",
"https://github.com/petewarden/OouraFFT/archive/v1.0.tar.gz",
],
)
tf_http_archive(
name = "snappy",
build_file = "//third_party:snappy.BUILD",
sha256 = "16b677f07832a612b0836178db7f374e414f94657c138e6993cbfc5dcc58651f",
strip_prefix = "snappy-1.1.8",
system_build_file = "//third_party/systemlibs:snappy.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/snappy/archive/1.1.8.tar.gz",
"https://github.com/google/snappy/archive/1.1.8.tar.gz",
],
)
tf_http_archive(
name = "nccl_archive",
build_file = "//third_party:nccl/archive.BUILD",
patch_file = "//third_party/nccl:archive.patch",
sha256 = "3ae89ddb2956fff081e406a94ff54ae5e52359f5d645ce977c7eba09b3b782e6",
strip_prefix = "nccl-2.8.3-1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nvidia/nccl/archive/v2.8.3-1.tar.gz",
"https://github.com/nvidia/nccl/archive/v2.8.3-1.tar.gz",
],
)
java_import_external(
name = "junit",
jar_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"https://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"https://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar",
],
licenses = ["reciprocal"], # Common Public License Version 1.0
testonly_ = True,
deps = ["@org_hamcrest_core"],
)
java_import_external(
name = "org_hamcrest_core",
jar_sha256 = "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"https://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"https://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
)
java_import_external(
name = "com_google_testing_compile",
jar_sha256 = "edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
"https://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
deps = ["@com_google_guava", "@com_google_truth"],
)
java_import_external(
name = "com_google_truth",
jar_sha256 = "032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
"https://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
],
licenses = ["notice"], # Apache 2.0
testonly_ = True,
deps = ["@com_google_guava"],
)
java_import_external(
name = "org_checkerframework_qual",
jar_sha256 = "d261fde25d590f6b69db7721d469ac1b0a19a17ccaaaa751c31f0d8b8260b894",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
"https://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
java_import_external(
name = "com_squareup_javapoet",
jar_sha256 = "5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
"https://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
tf_http_archive(
name = "com_google_pprof",
build_file = "//third_party:pprof.BUILD",
sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4",
strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
"https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
],
)
# The CUDA 11 toolkit ships with CUB. We should be able to delete this rule
# once TF drops support for CUDA 10.
tf_http_archive(
name = "cub_archive",
build_file = "//third_party:cub.BUILD",
sha256 = "162514b3cc264ac89d91898b58450190b8192e2af1142cf8ccac2d59aa160dda",
strip_prefix = "cub-1.9.9",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NVlabs/cub/archive/1.9.9.zip",
"https://github.com/NVlabs/cub/archive/1.9.9.zip",
],
)
tf_http_archive(
name = "cython",
build_file = "//third_party:cython.BUILD",
sha256 = "e2e38e1f0572ca54d6085df3dec8b607d20e81515fb80215aed19c81e8fe2079",
strip_prefix = "cython-0.29.21",
system_build_file = "//third_party/systemlibs:cython.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/cython/cython/archive/0.29.21.tar.gz",
"https://github.com/cython/cython/archive/0.29.21.tar.gz",
],
)
tf_http_archive(
name = "arm_neon_2_x86_sse",
build_file = "//third_party:arm_neon_2_x86_sse.BUILD",
sha256 = "213733991310b904b11b053ac224fee2d4e0179e46b52fe7f8735b8831e04dcc",
strip_prefix = "ARM_NEON_2_x86_SSE-1200fe90bb174a6224a525ee60148671a786a71f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
"https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
],
)
tf_http_archive(
name = "double_conversion",
build_file = "//third_party:double_conversion.BUILD",
sha256 = "2f7fbffac0d98d201ad0586f686034371a6d152ca67508ab611adc2386ad30de",
strip_prefix = "double-conversion-3992066a95b823efc8ccc1baf82a1cfc73f6e9b8",
system_build_file = "//third_party/systemlibs:double_conversion.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
"https://github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_float",
build_file = "//third_party:tflite_mobilenet_float.BUILD",
sha256 = "2fadeabb9968ec6833bee903900dda6e61b3947200535874ce2fe42a8493abc0",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_quant",
build_file = "//third_party:tflite_mobilenet_quant.BUILD",
sha256 = "d32432d28673a936b2d6281ab0600c71cf7226dfe4cdcef3012555f691744166",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant_protobuf",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "09280972c5777f1aa775ef67cb4ac5d5ed21970acd8535aeca62450ef14f0d79",
strip_prefix = "ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
"https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
],
)
tf_http_archive(
name = "tflite_conv_actions_frozen",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
],
)
tf_http_archive(
name = "tflite_ovic_testdata",
build_file = "//third_party:tflite_ovic_testdata.BUILD",
sha256 = "033c941b7829b05ca55a124a26a6a0581b1ececc154a2153cafcfdb54f80dca2",
strip_prefix = "ovic",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
"https://storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
],
)
tf_http_archive(
name = "rules_python",
sha256 = "aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz",
"https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz",
],
)
tf_http_archive(
name = "build_bazel_rules_android",
sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
strip_prefix = "rules_android-0.1.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
"https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
],
)
# Apple and Swift rules.
# https://github.com/bazelbuild/rules_apple/releases
tf_http_archive(
name = "build_bazel_rules_apple",
sha256 = "ee9e6073aeb5a65c100cb9c44b0017c937706a4ae03176e14a7e78620a198079",
strip_prefix = "rules_apple-5131f3d46794bf227d296c82f30c2499c9de3c5b",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_apple/archive/5131f3d46794bf227d296c82f30c2499c9de3c5b.tar.gz",
"https://github.com/bazelbuild/rules_apple/archive/5131f3d46794bf227d296c82f30c2499c9de3c5b.tar.gz",
],
)
# https://github.com/bazelbuild/rules_swift/releases
tf_http_archive(
name = "build_bazel_rules_swift",
sha256 = "d0833bc6dad817a367936a5f902a0c11318160b5e80a20ece35fb85a5675c886",
strip_prefix = "rules_swift-3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_swift/archive/3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8.tar.gz",
"https://github.com/bazelbuild/rules_swift/archive/3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8.tar.gz",
],
)
# https://github.com/bazelbuild/apple_support/releases
tf_http_archive(
name = "build_bazel_apple_support",
sha256 = "ad8ae80e93612b8151019367a3d1604d7a51c14480dae1254e10252007e8260c",
strip_prefix = "apple_support-501b4afb27745c4813a88ffa28acd901408014e4",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/apple_support/archive/501b4afb27745c4813a88ffa28acd901408014e4.tar.gz",
"https://github.com/bazelbuild/apple_support/archive/501b4afb27745c4813a88ffa28acd901408014e4.tar.gz",
],
)
# https://github.com/bazelbuild/bazel-skylib/releases
tf_http_archive(
name = "bazel_skylib",
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
],
)
# https://github.com/apple/swift-protobuf/releases
tf_http_archive(
name = "com_github_apple_swift_swift_protobuf",
strip_prefix = "swift-protobuf-1.6.0/",
sha256 = "4ccf6e5ea558e8287bf6331f9f6e52b3c321fca5f1d181d03680f415c32a6bba",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/apple/swift-protobuf/archive/1.6.0.zip",
"https://github.com/apple/swift-protobuf/archive/1.6.0.zip",
],
)
# https://github.com/google/xctestrunner/releases
http_file(
name = "xctestrunner",
executable = 1,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par",
"https://github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par",
],
)
tf_http_archive(
name = "nlohmann_json_lib",
build_file = "//third_party:nlohmann_json.BUILD",
sha256 = "c377963a95989270c943d522bfefe7b889ef5ed0e1e15d535fd6f6f16ed70732",
strip_prefix = "json-3.4.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nlohmann/json/archive/v3.4.0.tar.gz",
"https://github.com/nlohmann/json/archive/v3.4.0.tar.gz",
],
)
tf_http_archive(
name = "pybind11",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/pybind/pybind11/archive/v2.6.0.tar.gz",
"https://github.com/pybind/pybind11/archive/v2.6.0.tar.gz",
],
sha256 = "90b705137b69ee3b5fc655eaca66d0dc9862ea1759226f7ccd3098425ae69571",
strip_prefix = "pybind11-2.6.0",
build_file = "//third_party:pybind11.BUILD",
system_build_file = "//third_party/systemlibs:pybind11.BUILD",
)
tf_http_archive(
name = "wrapt",
build_file = "//third_party:wrapt.BUILD",
sha256 = "8a6fb40e8f8b6a66b4ba81a4044c68e6a7b1782f21cfabc06fb765332b4c3e51",
strip_prefix = "wrapt-1.11.1/src/wrapt",
system_build_file = "//third_party/systemlibs:wrapt.BUILD",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
"https://github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
],
)
tf_http_archive(
name = "coremltools",
sha256 = "0d594a714e8a5fd5bd740ad112ef59155c0482e25fdc8f8efa5758f90abdcf1e",
strip_prefix = "coremltools-3.3",
build_file = "//third_party:coremltools.BUILD",
urls = [
"http://mirror.tensorflow.org/github.com/apple/coremltools/archive/3.3.zip",
"https://github.com/apple/coremltools/archive/3.3.zip",
],
)
def workspace():
# Check the bazel version before executing any repository rules, in case
# those rules rely on the version we require here.
check_bazel_version_at_least("1.0.0")
# Initialize toolchains and platforms.
_tf_toolchains()
# Import third party repositories according to go/tfbr-thirdparty.
_initialize_third_party()
# Import all other repositories. This should happen before initializing
# any external repositories, because those come with their own
# dependencies. Those recursive dependencies will only be imported if they
# don't already exist (at least if the external repository macros were
# written according to common practice to query native.existing_rule()).
_tf_repositories()
tfrt_dependencies()
# Alias so it can be loaded without assigning to a different symbol to prevent
# shadowing previous loads and trigger a buildifier warning.
tf_workspace2 = workspace
| 48.571685 | 203 | 0.689241 |
4a27656913c0f9fe243e4673f96884d10712e2c3 | 3,964 | py | Python | data/process_data.py | dariusgm/disasterresponse | cd7b8f93ff769f0581cf7c541a1f85af7543d844 | [
"MIT"
] | null | null | null | data/process_data.py | dariusgm/disasterresponse | cd7b8f93ff769f0581cf7c541a1f85af7543d844 | [
"MIT"
] | null | null | null | data/process_data.py | dariusgm/disasterresponse | cd7b8f93ff769f0581cf7c541a1f85af7543d844 | [
"MIT"
] | null | null | null | import sys
import pandas as pd
import os
from sqlalchemy import create_engine
import sys
class ETLPipeline:
def __init__(self, messages_path: str, categories_path: str, sql_path: str):
self.messages_path = messages_path
self.categories_path = categories_path
self.sql_path = sql_path
def __transform_categories(self, df: pd.DataFrame) -> pd.DataFrame:
'''
Return ready to use graph to generate them once
:param df: (pd.DataFrame) data with cateogries column
:returns: (pd.DataFrame) ccategories column exploded to multiple columns
'''
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(expand=True, pat=";")
# select the first row of the categories dataframe
row = categories.iloc[0]
# use this row to extract a list of new column names for categories.
# one way is to apply a lambda function that takes everything
# up to the second to last character of each string with slicing
category_colnames = row.apply(lambda x: x.split("-")[0])
# rename the columns of `categories`
categories.columns = category_colnames
# Convert category values to just numbers 0 or 1
for column in categories:
# set each value to be the last character of the string
# convert column from string to numeric
categories[column] = categories[column].astype(str).apply(lambda x: int(x[-1]))
return categories
def __extract(self):
'''
Return raw df for messages and categories
:returns: (list(pd.DataFrame)) [messages_df, categories_df]
'''
return [
pd.read_csv(self.messages_path),
pd.read_csv(self.categories_path)
]
def __transform(self, categories: pd.DataFrame, messages: pd.DataFrame) -> pd.DataFrame:
'''
transform categories_df and messages_df.
Apply required transformations on the data.
:param categories: (pd.DataFrame) data with cateogries
:param messages: (pd.DataFrame) data with messages
:returns: merged df
'''
df = pd.concat([categories, messages], axis=1)
categories = self.__transform_categories(df)
df = df.drop(columns='categories')
return pd.concat([df, categories], axis=1)
def __load(self, df: pd.DataFrame) -> None:
'''
Dump the data without duplocates to a sqlite database
:param df: (pd.DataFrame) entire dataframe
:returns: None
'''
# check number of duplicates
print("length with duplicates: {}".format(len(df)))
# drop duplicates
df = df.drop_duplicates()
# check number of duplicates
print("length without duplicates: {}".format(len(df)))
engine = create_engine(f'sqlite:///{self.sql_path}')
df.to_sql('etl', engine, index=False)
def run(self):
'''
trigger entire ETL process
:returns: None
'''
messages_df, categories_df = self.__extract()
df = self.__transform(messages_df, categories_df)
self.__load(df)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
etl_pipeline = ETLPipeline(messages_filepath, categories_filepath, database_filepath)
etl_pipeline.run()
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main() | 35.711712 | 93 | 0.631181 |
4a276779e36901e75eeac835583a5f142b2b1684 | 529 | py | Python | venv/Exercises/ex062.py | jonassignoreti/Python-CursoemVideo | 24f5932bed2fe98308321be7dd9326e65a942d4b | [
"MIT"
] | null | null | null | venv/Exercises/ex062.py | jonassignoreti/Python-CursoemVideo | 24f5932bed2fe98308321be7dd9326e65a942d4b | [
"MIT"
] | null | null | null | venv/Exercises/ex062.py | jonassignoreti/Python-CursoemVideo | 24f5932bed2fe98308321be7dd9326e65a942d4b | [
"MIT"
] | null | null | null | '''Melhore o DESAFIO 61, perguntando para o usuário se ele quer mostrar mais alguns termos.
O programa encerra quando ele disser que quer mostrar 0 termos'''
print('{:=^100}'.format(' Os 10 PRIMEIROS TERMOS DE UMA PA '))
p = int(input('Primeiro Termo: '))
r = int(input('Razão: '))
cont = 1
meta = 10
while cont != (meta + 1):
termo = p + (cont - 1) * r
print('{}'.format(termo), end=' > ')
cont += 1
if cont == (meta + 1):
meta = int(input('\nQuantos termos você deseja mais ver: ')) + meta
print('END') | 35.266667 | 91 | 0.618147 |
4a2767ee85dc1db1e563b89771c652a5a38621c1 | 4,785 | py | Python | test/functional/wallet_importprunedfunds.py | btc20/bitcoin2.0 | e7a54b00ebd0e8d20f4d5315cc9a21c77ded25cd | [
"MIT"
] | null | null | null | test/functional/wallet_importprunedfunds.py | btc20/bitcoin2.0 | e7a54b00ebd0e8d20f4d5315cc9a21c77ded25cd | [
"MIT"
] | null | null | null | test/functional/wallet_importprunedfunds.py | btc20/bitcoin2.0 | e7a54b00ebd0e8d20f4d5315cc9a21c77ded25cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importprunedfunds and removeprunedfunds RPCs."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.bitcoin2config import *
class ImportPrunedFundsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(COINBASE_MATURITY+1)
self.sync_all()
# address
address1 = self.nodes[0].getnewaddress()
# pubkey
address2 = self.nodes[0].getnewaddress()
# privkey
address3 = self.nodes[0].getnewaddress()
address3_privkey = self.nodes[0].dumpprivkey(address3) # Using privkey
# Check only one address
address_info = self.nodes[0].getaddressinfo(address1)
assert_equal(address_info['ismine'], True)
self.sync_all()
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 1+COINBASE_MATURITY)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address2)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# Send funds to self
txnid1 = self.nodes[0].sendtoaddress(address1, 0.1)
self.nodes[0].generate(1)
rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex']
proof1 = self.nodes[0].gettxoutproof([txnid1])
txnid2 = self.nodes[0].sendtoaddress(address2, 0.05)
self.nodes[0].generate(1)
rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex']
proof2 = self.nodes[0].gettxoutproof([txnid2])
txnid3 = self.nodes[0].sendtoaddress(address3, 0.025)
self.nodes[0].generate(1)
rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex']
proof3 = self.nodes[0].gettxoutproof([txnid3])
self.sync_all()
# Import with no affiliated address
assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1)
balance1 = self.nodes[1].getbalance()
assert_equal(balance1, Decimal(0))
# Import with affiliated address with no rescan
self.nodes[1].importaddress(address=address2, rescan=False)
self.nodes[1].importprunedfunds(rawtransaction=rawtxn2, txoutproof=proof2)
assert [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid2]
# Import with private key with no rescan
self.nodes[1].importprivkey(privkey=address3_privkey, rescan=False)
self.nodes[1].importprunedfunds(rawtxn3, proof3)
assert [tx for tx in self.nodes[1].listtransactions() if tx['txid'] == txnid3]
balance3 = self.nodes[1].getbalance()
assert_equal(balance3, Decimal('0.025'))
# Addresses Test - after import
address_info = self.nodes[1].getaddressinfo(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address2)
assert_equal(address_info['iswatchonly'], True)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], True)
# Remove transactions
assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1)
assert not [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid1]
self.nodes[1].removeprunedfunds(txnid2)
assert not [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid2]
self.nodes[1].removeprunedfunds(txnid3)
assert not [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid3]
if __name__ == '__main__':
ImportPrunedFundsTest().main()
| 40.550847 | 117 | 0.681714 |
4a2769d33c0836f6b6cbb7ac9cb3d013cb356e56 | 11,431 | py | Python | tests/mdts/tests/functional_tests/test_nat_router.py | yantarou/midonet | 11fea2549416588a9170f6ce872790ad907d2a4b | [
"Apache-2.0"
] | 2 | 2018-08-29T01:36:17.000Z | 2018-11-22T16:57:25.000Z | tests/mdts/tests/functional_tests/test_nat_router.py | yantarou/midonet | 11fea2549416588a9170f6ce872790ad907d2a4b | [
"Apache-2.0"
] | 8 | 2018-05-24T13:36:03.000Z | 2021-02-19T16:01:43.000Z | tests/mdts/tests/functional_tests/test_nat_router.py | yantarou/midonet | 11fea2549416588a9170f6ce872790ad907d2a4b | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose.plugins.attrib import attr
from mdts.lib.binding_manager import BindingManager
from mdts.lib.physical_topology_manager import PhysicalTopologyManager
from mdts.lib.virtual_topology_manager import VirtualTopologyManager
from mdts.tests.utils.asserts import async_assert_that
from mdts.tests.utils.asserts import receives
from mdts.tests.utils.asserts import receives_icmp_unreachable_for_udp
from mdts.tests.utils.asserts import should_NOT_receive
from mdts.tests.utils.asserts import within_sec
from mdts.tests.utils.utils import bindings
from mdts.tests.utils.utils import wait_on_futures
import logging
import time
LOG = logging.getLogger(__name__)
PTM = PhysicalTopologyManager('../topologies/mmm_physical_test_nat_router.yaml')
VTM = VirtualTopologyManager('../topologies/mmm_virtual_test_nat_router.yaml')
BM = BindingManager(PTM, VTM)
binding_multihost = {
'description': 'spanning across multiple MMs',
'bindings': [
{'binding':
{'device_name': 'bridge-000-001', 'port_id': 2,
'host_id': 1, 'interface_id': 1}},
{'binding':
{'device_name': 'bridge-000-002', 'port_id': 2,
'host_id': 2, 'interface_id': 2}},
]
}
def set_filters(router_name, inbound_filter_name, outbound_filter_name):
"""Sets in-/out-bound filters to a router."""
router = VTM.get_router(router_name)
inbound_filter = None
if inbound_filter_name:
inbound_filter = VTM.get_chain(inbound_filter_name)
outbound_filter = None
if outbound_filter_name:
outbound_filter = VTM.get_chain(outbound_filter_name)
router.set_inbound_filter(inbound_filter)
router.set_outbound_filter(outbound_filter)
# Sleep here to make sure that the settings have been propagated.
time.sleep(5)
def unset_filters(router_name):
"""Unsets in-/out-bound filters from a router."""
set_filters(router_name, None, None)
def feed_receiver_mac(receiver):
"""Feeds the receiver's mac address to the MidoNet router."""
try:
router_port = VTM.get_router('router-000-001').get_port(2)
router_ip = router_port.get_mn_resource().get_port_address()
receiver_ip = receiver.get_ip()
f1 = async_assert_that(receiver, receives('dst host %s' % receiver_ip,
within_sec(10)))
receiver.send_arp_request(router_ip)
wait_on_futures([f1])
except:
LOG.warn('Oops, sending ARP from the receiver VM failed.')
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_dnat():
"""
Title: Tests DNAT on ping messages.
Scenario 1:
When: a VM sends ICMP echo request with ping command to an unassigned IP
address.
Then: the router performs DNAT on the message according to the rule chain
set to the router,
And: the receiver VM should receive the ICMP echo packet,
And: the ping command succeeds
"""
sender = BM.get_iface_for_port('bridge-000-001', 2)
receiver = BM.get_iface_for_port('bridge-000-002', 2)
# Reset in-/out-bound filters.
unset_filters('router-000-001')
feed_receiver_mac(receiver)
f2 = async_assert_that(receiver, should_NOT_receive('dst host 172.16.2.1 and icmp',
within_sec(5)))
f1 = sender.ping_ipv4_addr('100.100.100.100')
wait_on_futures([f1, f2])
# Set DNAT rule chains to the router
set_filters('router-000-001', 'pre_filter_001', 'post_filter_001')
f2 = async_assert_that(receiver, receives('dst host 172.16.2.1 and icmp',
within_sec(5)))
f3 = async_assert_that(sender, receives('src host 100.100.100.100 and icmp',
within_sec(5)))
f1 = sender.ping_ipv4_addr('100.100.100.100')
wait_on_futures([f1, f2, f3])
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_dnat_for_udp():
"""
Title: Tests DNAT on UDP packets.
Scenario:
When: a VM sends UDP packets to an unassigned IP address.
Then: the router performs DNAT on the message according to the rule chain
set to the router,
And: the UDP packets reach the receiver VM.
And: because the UDP port is not open, the receiver VM returns ICMP error
responses.
"""
sender = BM.get_iface_for_port('bridge-000-001', 2)
receiver = BM.get_iface_for_port('bridge-000-002', 2)
# Reset in-/out-bound filters.
unset_filters('router-000-001')
feed_receiver_mac(receiver)
# Target hardware is a router's incoming port.
router_port = VTM.get_router('router-000-001').get_port(1)
router_mac = router_port.get_mn_resource().get_port_mac()
f2 = async_assert_that(receiver,
should_NOT_receive('dst host 172.16.2.1 and udp',
within_sec(5)))
f1 = sender.send_udp(router_mac, '100.100.100.100', 29,
src_port=9, dst_port=9)
wait_on_futures([f1, f2])
# Set DNAT rule chains to the router
set_filters('router-000-001', 'pre_filter_001', 'post_filter_001')
f1 = async_assert_that(receiver,
receives('dst host 172.16.2.1 and udp',
within_sec(5)))
# Sender should receive ICMP unreachable as the receiver port is not open.
f2 = async_assert_that(sender,
receives_icmp_unreachable_for_udp(
'172.16.1.1', '100.100.100.100',
udp_src_port=9, udp_dst_port=9,
timeout=within_sec(10)))
f3 = sender.send_udp(router_mac, '100.100.100.100', 29,
src_port=9, dst_port=9)
wait_on_futures([f1, f2, f3])
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_snat():
"""
Title: Tests SNAT on ping messages.
Scenario:
When: a VM sends ICMP echo request with ping command to a different subnet,
Then: the router performs SNAT on the message according to the rule chain
set to the router,
And: the receiver VM should receive the ICMP echo packet, with src address
NATted,
And: the ping command succeeds.
"""
sender = BM.get_iface_for_port('bridge-000-001', 2)
receiver = BM.get_iface_for_port('bridge-000-002', 2)
# Reset in-/out-bound filters.
unset_filters('router-000-001')
feed_receiver_mac(receiver)
# No SNAT configured. Should not receive SNATed messages.
f2 = async_assert_that(receiver, should_NOT_receive('src host 172.16.1.100 and icmp',
within_sec(5)))
f1 = sender.ping4(receiver)
wait_on_futures([f1, f2])
# Set SNAT rule chains to the router
set_filters('router-000-001', 'pre_filter_002', 'post_filter_002')
# The receiver should receive SNATed messages.
f2 = async_assert_that(receiver, receives('src host 172.16.1.100 and icmp',
within_sec(5)))
f3 = async_assert_that(sender, receives('dst host 172.16.1.1 and icmp',
within_sec(5)))
f1 = sender.ping4(receiver)
wait_on_futures([f1, f2, f3])
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_snat_for_udp():
"""
Title: Tests SNAT on UDP packets.
Scenario:
When: a VM sends UDP packets to an unassigned IP address.
Then: the router performs SNAT on the message according to the rule chain
set to the router,
And: the UDP packets reach the receiver VM, with src address NATted,
And: because the UDP port is not open, the receiver VM returns ICMP error
responses.
"""
sender = BM.get_iface_for_port('bridge-000-001', 2)
receiver = BM.get_iface_for_port('bridge-000-002', 2)
# Reset in-/out-bound filters.
unset_filters('router-000-001')
feed_receiver_mac(receiver)
# Target hardware is a router's incoming port.
router_port = VTM.get_router('router-000-001').get_port(1)
router_mac = router_port.get_mn_resource().get_port_mac()
# No SNAT configured. Should not receive SNATed messages.
f2 = async_assert_that(receiver, should_NOT_receive('src host 172.16.1.100 and udp',
within_sec(5)))
f1 = sender.send_udp(router_mac, '172.16.2.1', 29,
src_port=9, dst_port=65000)
wait_on_futures([f1, f2])
# Set SNAT rule chains to the router
set_filters('router-000-001', 'pre_filter_002', 'post_filter_002')
# The receiver should receive SNATed messages.
f2 = async_assert_that(receiver, receives('src host 172.16.1.100 and udp',
within_sec(5)))
# Sender should receive ICMP unreachable as the receiver port is not open.
f3 = async_assert_that(sender, receives_icmp_unreachable_for_udp(
'172.16.1.1', '172.16.2.1',
udp_src_port=9, udp_dst_port=65000,
timeout=within_sec(5)))
f1 = sender.send_udp(router_mac, '172.16.2.1', 29,
src_port=9, dst_port=65000)
wait_on_futures([f1, f2, f3])
@attr(version="v1.2.0")
@bindings(binding_multihost)
def test_floating_ip():
"""
Title: Tests a floating IP.
Scenario 1:
When: a VM sends an ICMP echo request to a floating IP address
(100.100.100.100).
Then: the router performs DNAT on the message according to the rule chain
set to the router,
And: the receiver VM should receive the ICMP echo packet,
And: the receiver sends back an ICMP reply with its original IP address
as a source address.
And: the router applies SNAT to the reply packet.
And: the sender receives the reply with src address NATed to the floating IP
address.
"""
sender = BM.get_iface_for_port('bridge-000-001', 2)
receiver = BM.get_iface_for_port('bridge-000-002', 2)
# Reset in-/out-bound filters.
unset_filters('router-000-001')
feed_receiver_mac(receiver)
f1 = async_assert_that(receiver, should_NOT_receive('dst host 172.16.2.1 and icmp',
within_sec(10)))
sender.ping_ipv4_addr('100.100.100.100')
wait_on_futures([f1])
# Configure floating IP address with the router
set_filters('router-000-001', 'pre_filter_floating_ip',
'post_filter_floating_ip')
f1 = async_assert_that(receiver, receives('dst host 172.16.2.1 and icmp',
within_sec(10)))
f2 = async_assert_that(sender, receives('src host 100.100.100.100 and icmp',
within_sec(10)))
sender.ping_ipv4_addr('100.100.100.100')
wait_on_futures([f1, f2])
| 38.35906 | 89 | 0.652786 |
4a276b388a829dc6aaa6cd8623f0bc929eed7ea1 | 85 | py | Python | assignment1/admin.py | patilpriyankaco/assignment-django-1 | 577ab25c3d698132b082e107dffd25d0a1a50305 | [
"MIT"
] | null | null | null | assignment1/admin.py | patilpriyankaco/assignment-django-1 | 577ab25c3d698132b082e107dffd25d0a1a50305 | [
"MIT"
] | null | null | null | assignment1/admin.py | patilpriyankaco/assignment-django-1 | 577ab25c3d698132b082e107dffd25d0a1a50305 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Quote
admin.site.register(Quote) | 28.333333 | 32 | 0.835294 |
4a276c9f3a42863221612d9bd649385f40a3e39e | 3,090 | py | Python | managefunds/settings.py | chiraag-kakar/managefunds | bf768ef90bde06d1b7877b14765d48e858d971c8 | [
"MIT"
] | 1 | 2022-01-24T13:49:03.000Z | 2022-01-24T13:49:03.000Z | managefunds/settings.py | chiraag-kakar/managefunds | bf768ef90bde06d1b7877b14765d48e858d971c8 | [
"MIT"
] | null | null | null | managefunds/settings.py | chiraag-kakar/managefunds | bf768ef90bde06d1b7877b14765d48e858d971c8 | [
"MIT"
] | null | null | null | """
Django settings for managefunds project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4lilh&gbj8twfa-r0#r1^yyx^15c%26ukcujso=c-@%l8u4k%*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'managefunds.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'managefunds.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 25.327869 | 91 | 0.695793 |
4a276db1d093593755987b35ccb0eb1e337a0052 | 26,050 | py | Python | parsl/providers/aws/aws.py | gerrick/parsl | 820c43d2e893a016139692a23cd93f99cde5cc8d | [
"Apache-2.0"
] | 1 | 2020-11-21T17:32:09.000Z | 2020-11-21T17:32:09.000Z | parsl/providers/aws/aws.py | raffmont/parsl | 2f843b903a9008d230d42734db48317a40a2ad8e | [
"Apache-2.0"
] | null | null | null | parsl/providers/aws/aws.py | raffmont/parsl | 2f843b903a9008d230d42734db48317a40a2ad8e | [
"Apache-2.0"
] | 1 | 2022-03-09T10:51:12.000Z | 2022-03-09T10:51:12.000Z | import json
import logging
import os
import time
from string import Template
from parsl.dataflow.error import ConfigurationError
from parsl.providers.aws.template import template_string
from parsl.providers.provider_base import ExecutionProvider, JobState, JobStatus
from parsl.providers.error import OptionalModuleMissing
from parsl.utils import RepresentationMixin
from parsl.launchers import SingleNodeLauncher
logger = logging.getLogger(__name__)
try:
import boto3
from botocore.exceptions import ClientError
except ImportError:
_boto_enabled = False
else:
_boto_enabled = True
translate_table = {
'pending': JobState.PENDING,
'running': JobState.RUNNING,
'terminated': JobState.COMPLETED,
'shutting-down': JobState.COMPLETED, # (configuring),
'stopping': JobState.COMPLETED, # We shouldn't really see this state
'stopped': JobState.COMPLETED, # We shouldn't really see this state
}
class AWSProvider(ExecutionProvider, RepresentationMixin):
"""A provider for using Amazon Elastic Compute Cloud (EC2) resources.
One of 3 methods are required to authenticate: keyfile, profile or environment
variables. If neither keyfile or profile are set, the following environment
variables must be set: ``AWS_ACCESS_KEY_ID`` (the access key for your AWS account),
``AWS_SECRET_ACCESS_KEY`` (the secret key for your AWS account), and (optionaly) the
``AWS_SESSION_TOKEN`` (the session key for your AWS account).
Parameters
----------
image_id : str
Identification of the Amazon Machine Image (AMI).
worker_init : str
String to append to the Userdata script executed in the cloudinit phase of
instance initialization.
walltime : str
Walltime requested per block in HH:MM:SS.
key_file : str
Path to json file that contains 'AWSAccessKeyId' and 'AWSSecretKey'.
nodes_per_block : int
This is always 1 for ec2. Nodes to provision per block.
profile : str
Profile to be used from the standard aws config file ~/.aws/config.
nodes_per_block : int
Nodes to provision per block. Default is 1.
init_blocks : int
Number of blocks to provision at the start of the run. Default is 1.
min_blocks : int
Minimum number of blocks to maintain. Default is 0.
max_blocks : int
Maximum number of blocks to maintain. Default is 10.
instance_type : str
EC2 instance type. Instance types comprise varying combinations of CPU, memory, .
storage, and networking capacity For more information on possible instance types,.
see `here <https://aws.amazon.com/ec2/instance-types/>`_ Default is 't2.small'.
region : str
Amazon Web Service (AWS) region to launch machines. Default is 'us-east-2'.
key_name : str
Name of the AWS private key (.pem file) that is usually generated on the console
to allow SSH access to the EC2 instances. This is mostly used for debugging.
spot_max_bid : float
Maximum bid price (if requesting spot market machines).
iam_instance_profile_arn : str
Launch instance with a specific role.
state_file : str
Path to the state file from a previous run to re-use.
walltime : str
Walltime requested per block in HH:MM:SS. This option is not currently honored by this provider.
launcher : Launcher
Launcher for this provider. Possible launchers include
:class:`~parsl.launchers.SingleNodeLauncher` (the default),
:class:`~parsl.launchers.SrunLauncher`, or
:class:`~parsl.launchers.AprunLauncher`
linger : Bool
When set to True, the workers will not ``halt``. The user is responsible for shutting
down the node.
"""
def __init__(self,
image_id,
key_name,
init_blocks=1,
min_blocks=0,
max_blocks=10,
nodes_per_block=1,
parallelism=1,
worker_init='',
instance_type='t2.small',
region='us-east-2',
spot_max_bid=0,
key_file=None,
profile=None,
iam_instance_profile_arn='',
state_file=None,
walltime="01:00:00",
linger=False,
launcher=SingleNodeLauncher()):
if not _boto_enabled:
raise OptionalModuleMissing(['boto3'], "AWS Provider requires the boto3 module.")
self.image_id = image_id
self._label = 'ec2'
self.init_blocks = init_blocks
self.min_blocks = min_blocks
self.max_blocks = max_blocks
self.nodes_per_block = nodes_per_block
self.max_nodes = max_blocks * nodes_per_block
self.parallelism = parallelism
self.worker_init = worker_init
self.instance_type = instance_type
self.region = region
self.spot_max_bid = spot_max_bid
self.key_name = key_name
self.key_file = key_file
self.profile = profile
self.iam_instance_profile_arn = iam_instance_profile_arn
self.walltime = walltime
self.launcher = launcher
self.linger = linger
self.resources = {}
self.state_file = state_file if state_file is not None else '.ec2_{}.json'.format(self.label)
env_specified = os.getenv("AWS_ACCESS_KEY_ID") is not None and os.getenv("AWS_SECRET_ACCESS_KEY") is not None
if profile is None and key_file is None and not env_specified:
raise ConfigurationError("Must specify either profile', 'key_file', or "
"'AWS_ACCESS_KEY_ID' and 'AWS_SECRET_ACCESS_KEY' environment variables.")
try:
self.initialize_boto_client()
except Exception as e:
logger.error("{} failed to initialize.".format(self))
raise e
state_file_exists = False
try:
self.read_state_file(self.state_file)
state_file_exists = True
except Exception:
logger.info("No state file found. Cannot load previous options. Creating new infrastructure.")
if not state_file_exists:
try:
self.create_vpc().id
except Exception as e:
logger.info("Failed to create ec2 infrastructure: {0}".format(e))
raise
else:
self.write_state_file()
def initialize_boto_client(self):
"""Initialize the boto client."""
self.session = self.create_session()
self.client = self.session.client('ec2')
self.ec2 = self.session.resource('ec2')
self.instances = []
self.instance_states = {}
self.vpc_id = 0
self.sg_id = 0
self.sn_ids = []
def read_state_file(self, state_file):
"""Read the state file, if it exists.
If this script has been run previously, resource IDs will have been written to a
state file. On starting a run, a state file will be looked for before creating new
infrastructure. Information on VPCs, security groups, and subnets are saved, as
well as running instances and their states.
AWS has a maximum number of VPCs per region per account, so we do not want to
clutter users' AWS accounts with security groups and VPCs that will be used only
once.
"""
try:
fh = open(state_file, 'r')
state = json.load(fh)
self.vpc_id = state['vpcID']
self.sg_id = state['sgID']
self.sn_ids = state['snIDs']
self.instances = state['instances']
except Exception as e:
logger.debug("Caught exception while reading state file: {0}".format(e))
raise e
logger.debug("Done reading state from the local state file.")
def write_state_file(self):
"""Save information that must persist to a file.
We do not want to create a new VPC and new identical security groups, so we save
information about them in a file between runs.
"""
fh = open('awsproviderstate.json', 'w')
state = {}
state['vpcID'] = self.vpc_id
state['sgID'] = self.sg_id
state['snIDs'] = self.sn_ids
state['instances'] = self.instances
state["instanceState"] = self.instance_states
fh.write(json.dumps(state, indent=4))
def create_session(self):
"""Create a session.
First we look in self.key_file for a path to a json file with the
credentials. The key file should have 'AWSAccessKeyId' and 'AWSSecretKey'.
Next we look at self.profile for a profile name and try
to use the Session call to automatically pick up the keys for the profile from
the user default keys file ~/.aws/config.
Finally, boto3 will look for the keys in environment variables:
AWS_ACCESS_KEY_ID: The access key for your AWS account.
AWS_SECRET_ACCESS_KEY: The secret key for your AWS account.
AWS_SESSION_TOKEN: The session key for your AWS account.
This is only needed when you are using temporary credentials.
The AWS_SECURITY_TOKEN environment variable can also be used,
but is only supported for backwards compatibility purposes.
AWS_SESSION_TOKEN is supported by multiple AWS SDKs besides python.
"""
session = None
if self.key_file is not None:
credfile = os.path.expandvars(os.path.expanduser(self.key_file))
try:
with open(credfile, 'r') as f:
creds = json.load(f)
except json.JSONDecodeError as e:
logger.error(
"EC2Provider '{}': json decode error in credential file {}".format(self.label, credfile)
)
raise e
except Exception as e:
logger.debug(
"EC2Provider '{0}' caught exception while reading credential file: {1}".format(
self.label, credfile
)
)
raise e
logger.debug("EC2Provider '{}': Using credential file to create session".format(self.label))
session = boto3.session.Session(region_name=self.region, **creds)
elif self.profile is not None:
logger.debug("EC2Provider '{}': Using profile name to create session".format(self.label))
session = boto3.session.Session(
profile_name=self.profile, region_name=self.region
)
else:
logger.debug("EC2Provider '{}': Using environment variables to create session".format(self.label))
session = boto3.session.Session(region_name=self.region)
return session
def create_vpc(self):
"""Create and configure VPC
We create a VPC with CIDR 10.0.0.0/16, which provides up to 64,000 instances.
We attach a subnet for each availability zone within the region specified in the
config. We give each subnet an ip range like 10.0.X.0/20, which is large enough
for approx. 4000 instances.
Security groups are configured in function security_group.
"""
try:
# We use a large VPC so that the cluster can get large
vpc = self.ec2.create_vpc(
CidrBlock='10.0.0.0/16',
AmazonProvidedIpv6CidrBlock=False,
)
except Exception as e:
# This failure will cause a full abort
logger.error("{}\n".format(e))
raise e
# Attach internet gateway so that our cluster can
# talk to the outside internet
internet_gateway = self.ec2.create_internet_gateway()
internet_gateway.attach_to_vpc(VpcId=vpc.vpc_id) # Returns None
self.internet_gateway = internet_gateway.id
# Create and configure route table to allow proper traffic
route_table = self.config_route_table(vpc, internet_gateway)
self.route_table = route_table.id
# Get all avaliability zones
availability_zones = self.client.describe_availability_zones()
# go through AZs and set up a subnet per
for num, zone in enumerate(availability_zones['AvailabilityZones']):
if zone['State'] == "available":
# Create a large subnet (4000 max nodes)
subnet = vpc.create_subnet(
CidrBlock='10.0.{}.0/20'.format(16 * num), AvailabilityZone=zone['ZoneName']
)
# Make subnet accessible
subnet.meta.client.modify_subnet_attribute(
SubnetId=subnet.id, MapPublicIpOnLaunch={"Value": True}
)
route_table.associate_with_subnet(SubnetId=subnet.id)
self.sn_ids.append(subnet.id)
else:
logger.info("{} unavailable".format(zone['ZoneName']))
# Security groups
self.security_group(vpc)
self.vpc_id = vpc.id
return vpc
def security_group(self, vpc):
"""Create and configure a new security group.
Allows all ICMP in, all TCP and UDP in within VPC.
This security group is very open. It allows all incoming ping requests on all
ports. It also allows all outgoing traffic on all ports. This can be limited by
changing the allowed port ranges.
Parameters
----------
vpc : VPC instance
VPC in which to set up security group.
"""
sg = vpc.create_security_group(
GroupName="private-subnet", Description="security group for remote executors"
)
ip_ranges = [{'CidrIp': '10.0.0.0/16'}]
# Allows all ICMP in, all TCP and UDP in within VPC
in_permissions = [
{
'IpProtocol': 'TCP',
'FromPort': 0,
'ToPort': 65535,
'IpRanges': ip_ranges,
}, {
'IpProtocol': 'UDP',
'FromPort': 0,
'ToPort': 65535,
'IpRanges': ip_ranges,
}, {
'IpProtocol': 'ICMP',
'FromPort': -1,
'ToPort': -1,
'IpRanges': [{
'CidrIp': '0.0.0.0/0'
}],
}, {
'IpProtocol': 'TCP',
'FromPort': 22,
'ToPort': 22,
'IpRanges': [{
'CidrIp': '0.0.0.0/0'
}],
}
]
# Allows all TCP out, all TCP and UDP out within VPC
out_permissions = [
{
'IpProtocol': 'TCP',
'FromPort': 0,
'ToPort': 65535,
'IpRanges': [{
'CidrIp': '0.0.0.0/0'
}],
},
{
'IpProtocol': 'TCP',
'FromPort': 0,
'ToPort': 65535,
'IpRanges': ip_ranges,
},
{
'IpProtocol': 'UDP',
'FromPort': 0,
'ToPort': 65535,
'IpRanges': ip_ranges,
},
]
sg.authorize_ingress(IpPermissions=in_permissions)
sg.authorize_egress(IpPermissions=out_permissions)
self.sg_id = sg.id
return sg
def config_route_table(self, vpc, internet_gateway):
"""Configure route table for Virtual Private Cloud (VPC).
Parameters
----------
vpc : dict
Representation of the VPC (created by create_vpc()).
internet_gateway : dict
Representation of the internet gateway (created by create_vpc()).
"""
route_table = vpc.create_route_table()
route_table.create_route(
DestinationCidrBlock='0.0.0.0/0', GatewayId=internet_gateway.internet_gateway_id
)
return route_table
def xstr(self, s):
return '' if s is None else s
def spin_up_instance(self, command, job_name):
"""Start an instance in the VPC in the first available subnet.
N instances will be started if nodes_per_block > 1.
Not supported. We only do 1 node per block.
Parameters
----------
command : str
Command string to execute on the node.
job_name : str
Name associated with the instances.
"""
command = Template(template_string).substitute(jobname=job_name,
user_script=command,
linger=str(self.linger).lower(),
worker_init=self.worker_init)
instance_type = self.instance_type
subnet = self.sn_ids[0]
ami_id = self.image_id
total_instances = len(self.instances)
if float(self.spot_max_bid) > 0:
spot_options = {
'MarketType': 'spot',
'SpotOptions': {
'MaxPrice': str(self.spot_max_bid),
'SpotInstanceType': 'one-time',
'InstanceInterruptionBehavior': 'terminate'
}
}
else:
spot_options = {}
if total_instances > self.max_nodes:
logger.warning("Exceeded instance limit ({}). Cannot continue\n".format(self.max_nodes))
return [None]
try:
tag_spec = [{"ResourceType": "instance", "Tags": [{'Key': 'Name', 'Value': job_name}]}]
instance = self.ec2.create_instances(
MinCount=1,
MaxCount=1,
InstanceType=instance_type,
ImageId=ami_id,
KeyName=self.key_name,
SubnetId=subnet,
SecurityGroupIds=[self.sg_id],
TagSpecifications=tag_spec,
InstanceMarketOptions=spot_options,
InstanceInitiatedShutdownBehavior='terminate',
IamInstanceProfile={'Arn': self.iam_instance_profile_arn},
UserData=command
)
except ClientError as e:
print(e)
logger.error(e.response)
return [None]
except Exception as e:
logger.error("Request for EC2 resources failed : {0}".format(e))
return [None]
self.instances.append(instance[0].id)
logger.info(
"Started up 1 instance {}. Instance type: {}".format(instance[0].id, instance_type)
)
return instance
def shut_down_instance(self, instances=None):
"""Shut down a list of instances, if provided.
If no instance is provided, the last instance started up will be shut down.
"""
if instances and len(self.instances) > 0:
print(instances)
try:
print([i.id for i in instances])
except Exception as e:
print(e)
term = self.client.terminate_instances(InstanceIds=instances)
logger.info("Shut down {} instances (ids:{}".format(len(instances), str(instances)))
elif len(self.instances) > 0:
instance = self.instances.pop()
term = self.client.terminate_instances(InstanceIds=[instance])
logger.info("Shut down 1 instance (id:{})".format(instance))
else:
logger.warning("No Instances to shut down.\n")
return -1
self.get_instance_state()
return term
def get_instance_state(self, instances=None):
"""Get states of all instances on EC2 which were started by this file."""
if instances:
desc = self.client.describe_instances(InstanceIds=instances)
else:
desc = self.client.describe_instances(InstanceIds=self.instances)
# pprint.pprint(desc['Reservations'],indent=4)
for i in range(len(desc['Reservations'])):
instance = desc['Reservations'][i]['Instances'][0]
self.instance_states[instance['InstanceId']] = instance['State']['Name']
return self.instance_states
def status(self, job_ids):
"""Get the status of a list of jobs identified by their ids.
Parameters
----------
job_ids : list of str
Identifiers for the jobs.
Returns
-------
list of int
The status codes of the requsted jobs.
"""
all_states = []
status = self.client.describe_instances(InstanceIds=list(job_ids))
for r in status['Reservations']:
for i in r['Instances']:
instance_id = i['InstanceId']
instance_state = translate_table.get(i['State']['Name'], JobState.UNKNOWN)
instance_status = JobStatus(instance_state)
self.resources[instance_id]['status'] = instance_status
all_states.extend([instance_status])
return all_states
def submit(self, command='sleep 1', tasks_per_node=1, job_name="parsl.aws"):
"""Submit the command onto a freshly instantiated AWS EC2 instance.
Submit returns an ID that corresponds to the task that was just submitted.
Parameters
----------
command : str
Command to be invoked on the remote side.
tasks_per_node : int (default=1)
Number of command invocations to be launched per node
job_name : str
Prefix for the job name.
Returns
-------
None or str
If at capacity, None will be returned. Otherwise, the job identifier will be returned.
"""
job_name = "parsl.aws.{0}".format(time.time())
wrapped_cmd = self.launcher(command,
tasks_per_node,
self.nodes_per_block)
[instance, *rest] = self.spin_up_instance(command=wrapped_cmd, job_name=job_name)
if not instance:
logger.error("Failed to submit request to EC2")
return None
logger.debug("Started instance_id: {0}".format(instance.instance_id))
state = translate_table.get(instance.state['Name'], JobState.PENDING)
self.resources[instance.instance_id] = {
"job_id": instance.instance_id,
"instance": instance,
"status": JobStatus(state)
}
return instance.instance_id
def cancel(self, job_ids):
"""Cancel the jobs specified by a list of job ids.
Parameters
----------
job_ids : list of str
List of of job identifiers
Returns
-------
list of bool
Each entry in the list will contain False if the operation fails. Otherwise, the entry will be True.
"""
if self.linger is True:
logger.debug("Ignoring cancel requests due to linger mode")
return [False for x in job_ids]
try:
self.client.terminate_instances(InstanceIds=list(job_ids))
except Exception as e:
logger.error("Caught error while attempting to remove instances: {0}".format(job_ids))
raise e
else:
logger.debug("Removed the instances: {0}".format(job_ids))
for job_id in job_ids:
self.resources[job_id]["status"] = JobStatus(JobState.COMPLETED)
for job_id in job_ids:
self.instances.remove(job_id)
return [True for x in job_ids]
def show_summary(self):
"""Print human readable summary of current AWS state to log and to console."""
self.get_instance_state()
status_string = "EC2 Summary:\n\tVPC IDs: {}\n\tSubnet IDs: \
{}\n\tSecurity Group ID: {}\n\tRunning Instance IDs: {}\n".format(
self.vpc_id, self.sn_ids, self.sg_id, self.instances
)
status_string += "\tInstance States:\n\t\t"
self.get_instance_state()
for state in self.instance_states.keys():
status_string += "Instance ID: {} State: {}\n\t\t".format(
state, self.instance_states[state]
)
status_string += "\n"
logger.info(status_string)
return status_string
def teardown(self):
"""Teardown the EC2 infastructure.
Terminate all EC2 instances, delete all subnets, delete security group, delete VPC,
and reset all instance variables.
"""
self.shut_down_instance(self.instances)
self.instances = []
try:
self.client.delete_internet_gateway(InternetGatewayId=self.internet_gateway)
self.internet_gateway = None
self.client.delete_route_table(RouteTableId=self.route_table)
self.route_table = None
for subnet in list(self.sn_ids):
# Cast to list ensures that this is a copy
# Which is important because it means that
# the length of the list won't change during iteration
self.client.delete_subnet(SubnetId=subnet)
self.sn_ids.remove(subnet)
self.client.delete_security_group(GroupId=self.sg_id)
self.sg_id = None
self.client.delete_vpc(VpcId=self.vpc_id)
self.vpc_id = None
except Exception as e:
logger.error("{}".format(e))
raise e
self.show_summary()
os.remove(self.config['state_file_path'])
@property
def label(self):
return self._label
@property
def current_capacity(self):
"""Returns the current blocksize."""
return len(self.instances)
def goodbye(self):
self.teardown()
@property
def status_polling_interval(self):
return 60
| 36.950355 | 117 | 0.587946 |
4a276e343345dc34e2d63fa1702c14a2a72af94e | 882 | py | Python | test/iqdb_test.py | Coder-Sakura/PicImageSearch | f947d0aa5648f39763f35295e7d67b9ec2eecca8 | [
"MIT"
] | null | null | null | test/iqdb_test.py | Coder-Sakura/PicImageSearch | f947d0aa5648f39763f35295e7d67b9ec2eecca8 | [
"MIT"
] | null | null | null | test/iqdb_test.py | Coder-Sakura/PicImageSearch | f947d0aa5648f39763f35295e7d67b9ec2eecca8 | [
"MIT"
] | null | null | null | from loguru import logger
from PicImageSearch import Iqdb
_REQUESTS_KWARGS = {
# 'proxies': {
# 'https': 'http://127.0.0.1:10809',
# }
# 如果需要代理
}
iqdb = Iqdb()
res = iqdb.search(r"https://pixiv.cat/77702503-1.jpg")
# logger.info(res.origin)
# logger.info(res.raw)
logger.info("说明: " + res.raw[0].content)
logger.info("来源地址: " + res.raw[0].url)
logger.info("缩略图: " + res.raw[0].thumbnail)
logger.info("相似度: " + res.raw[0].similarity)
logger.info("图片大小: " + res.raw[0].size)
logger.info("图片来源: " + res.raw[0].source)
logger.info("其他图片来源: " + str(res.raw[0].other_source))
logger.info("SauceNAO搜图链接: " + res.saucenao)
logger.info("Ascii2d搜图链接: " + res.ascii2d)
logger.info("TinEye搜图链接: " + res.tineye)
logger.info("Google搜图链接: " + res.google)
logger.info("相似度低的结果: " + str(res.more))
| 32.666667 | 59 | 0.599773 |
4a276ee4adc4fcd9270ec436ef97aa86b0ccbb73 | 1,610 | py | Python | src/server/oasisapi/files/viewsets.py | cdlhub/OasisPlatform | b9e23448f9ddb18ba589081ee9f702f5c50c2b78 | [
"BSD-3-Clause"
] | null | null | null | src/server/oasisapi/files/viewsets.py | cdlhub/OasisPlatform | b9e23448f9ddb18ba589081ee9f702f5c50c2b78 | [
"BSD-3-Clause"
] | null | null | null | src/server/oasisapi/files/viewsets.py | cdlhub/OasisPlatform | b9e23448f9ddb18ba589081ee9f702f5c50c2b78 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from django_filters import rest_framework as filters
from rest_framework import viewsets, mixins
from rest_framework.decorators import action
from rest_framework.parsers import MultiPartParser
from rest_framework.settings import api_settings
from ..filters import TimeStampedFilter
from .views import handle_related_file
from .serializers import RelatedFileSerializer
from .models import RelatedFile
class FilesFilter(TimeStampedFilter):
content_type = filters.CharFilter(
help_text=_('Filter results by case insensitive `supplier_id` equal to the given string'),
lookup_expr='iexact',
field_name='content_type'
)
filename__contains = filters.CharFilter(
help_text=_('Filter results by case insensitive `supplier_id` containing the given string'),
lookup_expr='icontains',
field_name='filename'
)
user = filters.CharFilter(
help_text=_('Filter results by case insensitive `model_id` equal to the given string'),
lookup_expr='iexact',
field_name='creator_name'
)
class Meta:
model = RelatedFile
fields = [
'content_type',
'filename__contains',
'user',
]
class FilesViewSet(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
""" Add doc string here
"""
queryset = RelatedFile.objects.all()
serializer_class = RelatedFileSerializer
filter_class = FilesFilter
| 32.2 | 100 | 0.706832 |
4a276f0f99cc640529418675c4f3e4ca3341a700 | 1,707 | py | Python | src/backup.py | jpatnayk/i3wm-themer | 8e2a63c690b2b08b0ef47ca4ad9397d570c51fab | [
"MIT"
] | 1 | 2020-02-05T10:51:55.000Z | 2020-02-05T10:51:55.000Z | src/backup.py | jpatnayk/i3wm-themer | 8e2a63c690b2b08b0ef47ca4ad9397d570c51fab | [
"MIT"
] | null | null | null | src/backup.py | jpatnayk/i3wm-themer | 8e2a63c690b2b08b0ef47ca4ad9397d570c51fab | [
"MIT"
] | null | null | null | import os.path
from shutil import copyfile
import fileutils as fileu
import msgfunc as prnt
def backup_file( config, back_file, destination):
if(fileu.locate_file(config[back_file])):
prnt.prnt( '-s', 'Located your '+config[back_file]+' file!')
try:
copyfile( config[back_file], destination)
prnt.prnt( '-s', 'Backed it up successfully!')
return True
except:
prnt.prnt( '-f', 'Failed to back it up!')
return False
else:
prnt.prnt( '-f', 'Could not locate '+config[back_file]+' file!')
return False
def backup_config( backup_folder, configuration):
prnt.prnt( '-n', 'Backing up your files.')
if( fileu.locate_folder(backup_folder) ):
prnt.prnt( '-s', 'Located the backup folder.')
# Backup i3 file
if 'i3-config' in configuration:
if( backup_file( configuration, 'i3-config', backup_folder+'/i3.config')):
prnt.prnt( '-s', 'Success!')
else:
prnt.prnt( '-f', 'Failed!')
# Backup Polybr config
if 'polybar-config' in configuration:
if( backup_file( configuration, 'polybar-config', backup_folder+'/polybar.config')):
prnt.prnt( '-s', 'Success!')
else:
prnt.prnt( '-f', 'Failed!')
# Backup xresources
if 'xresources' in configuration:
if( backup_file( configuration, 'xresources', backup_folder+'/xresources')):
prnt.prnt( '-s', 'Success!')
else:
prnt.prnt( '-f', 'Failed!')
else:
prnt.prnt( '-f', 'Failed to locate the backup folder.')
exit(9)
| 33.470588 | 96 | 0.561219 |
4a276f2b334e648d3bb1b0a7886ecfed56b79597 | 882 | py | Python | darts/__init__.py | amadejkocbek/darts | 074be2a76eee11258da066878c564badf40834e9 | [
"Apache-2.0"
] | null | null | null | darts/__init__.py | amadejkocbek/darts | 074be2a76eee11258da066878c564badf40834e9 | [
"Apache-2.0"
] | 42 | 2021-10-04T17:11:50.000Z | 2021-12-24T15:37:41.000Z | darts/__init__.py | amadejkocbek/darts | 074be2a76eee11258da066878c564badf40834e9 | [
"Apache-2.0"
] | null | null | null | """
darts
-----
"""
from .timeseries import TimeSeries, concatenate
import matplotlib as mpl
from matplotlib import cycler
__version__ = "0.16.0"
colors = cycler(
color=["black", "003DFD", "b512b8", "11a9ba", "0d780f", "f77f07", "ba0f0f"]
)
u8plots_mplstyle = {
"font.family": "sans serif",
"axes.edgecolor": "black",
"axes.grid": True,
"axes.labelcolor": "#333333",
"axes.labelweight": 600,
"axes.linewidth": 1,
"axes.prop_cycle": colors,
"axes.spines.top": False,
"axes.spines.right": False,
"axes.spines.bottom": False,
"axes.spines.left": False,
"grid.color": "#dedede",
"legend.frameon": False,
"lines.linewidth": 1.3,
"xtick.color": "#333333",
"xtick.labelsize": "small",
"ytick.color": "#333333",
"ytick.labelsize": "small",
"xtick.bottom": False,
}
mpl.rcParams.update(u8plots_mplstyle)
| 21.512195 | 79 | 0.622449 |
4a276fae313692800c347880b9d90906557fa7a6 | 27,434 | py | Python | src/graphepp/graphepp.py | jwallnoefer/graphepp | 684b683d84ce8ccfb5c809e5aa3f4f1acb0f7048 | [
"MIT"
] | null | null | null | src/graphepp/graphepp.py | jwallnoefer/graphepp | 684b683d84ce8ccfb5c809e5aa3f4f1acb0f7048 | [
"MIT"
] | null | null | null | src/graphepp/graphepp.py | jwallnoefer/graphepp | 684b683d84ce8ccfb5c809e5aa3f4f1acb0f7048 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Functions for multiparite entanglement purification on graph states.
See README.md for an overview of the functionality.
"""
# All states are assumed to be diagonal in the graph state basis corresponding to
# the graph state considered.
# The noise should work for arbitrary graph states, while the purification
# protocol only makes sense for two-colorable graph states.
#
# Make sure the variables input follow the conventions given in the docstrings,
# since not many sanity checks are included.
#
# This should run reasonably well even for bigger states (ca. 10 qubits) if
# cythonized.
import numpy as np
from itertools import product
from functools import lru_cache
# ====Graph definitions==== #
def adj_matrix(N, E):
"""Calculates the adjacency matrix from vetices and edges.
The graph has `N` vertices (labeled 0 to N-1) and edges `E`.
Parameters
----------
N : int
Number of vertices.
E : list (or tuple) of tuples
Should contain 2-tuples with the edges of the graph. Each pair
(i,j) indicates a connection between vertices i and j. Only
simple, unweighted, undirected graphs are supported. Note that
the `N` vertices are labeled 0...N-1
Returns
-------
adj : np.ndarray
Adjacency matrix of the graph specified. Is a symmetric `N` x `N` matrix
with N_{ij}=1 if (i,j) is in `E` and 0 otherwise.
"""
adj = np.zeros((N, N), dtype=int)
for i, n in product(range(N), repeat=2):
if (i, n) in E:
adj[i, n] = 1
adj[n, i] = 1
return adj
class Graph(object):
"""A graph object consisting of vertices and edges.
Other functions that need to know in which graph state basis a state is
given expect a `Graph` object to specify the associated graph.
The properties are read-only on purpose to make this hashable. This is
desirable because some functions in this module profit heavily from caching.
Parameters
----------
N : int
Number of vertices.
E : list of tuples of ints
Should contain 2-tuples with the edges of the graph. Each pair
(i,j) indicates a connection between vertices i and j. Only
simple, unweighted, undirected graphs are supported. Note that
the `N` vertices are labeled 0...N-1
sets : list of list of ints, optional
Optionally define subsets of vertices, e.g. coloring of the graph
as expected for the entanglement purification protocols. Default: []
Attributes
----------
adj : np.ndarray
Adjacency matrix of the graph.
N
E
sets
a : tuple of ints
the first subset of vertices (e.g. one color)
b : tuple of ints
the second subset of vertices (e.g. one color)
"""
def __init__(self, N, E, sets=[]):
self._N = N
self._E = tuple(sorted(tuple(sorted(edge)) for edge in E))
self._sets = tuple(tuple(sorted(set)) for set in sets)
self._adj = tuple(tuple(row) for row in adj_matrix(N, E))
def __repr__(self):
return f"gg.Graph(N={self.N}, E={self.E}, sets={self.sets})"
def __hash__(self):
return hash((self._N, self._sets, self._adj))
def __eq__(self, other):
if isinstance(other, Graph):
return (
self.N == other.N
and self.sets == other.sets
and np.all(self.adj == other.adj)
)
else:
return NotImplemented
@property
def N(self):
return self._N
@property
def E(self):
return self._E
@property
def sets(self):
return self._sets
@property
def adj(self):
return np.array(self._adj, dtype=int)
@property
def a(self):
# subset a is the first color
try:
return self.sets[0]
except IndexError:
return None
@property
def b(self):
# subset b is the first color
try:
return self.sets[1]
except IndexError:
return None
# ====Noise functions==== #
def noisy(rho, subset, graph=None):
"""Template to generate noise patterns.
In physical terms this is correlated sigma_z noise on all particles in `subset`.
Parameters
----------
rho : np.ndarray
Is the state acted on. Should be a 2**N-dimensional vector
with the diagonal entries of the density matrix in the graph
state basis.
subset : list of int
The list of which qubits are affected, counting starts at 0.
Indices are expected to be in order
012...(N-1) regardless of coloring of the vertices.
graph : Graph, optional
Specifies in which graph state basis rho is given.
This function does not use it - only included for consistency. Default: None
Returns
-------
np.ndarray
The state after the action. Same shape as `rho`.
"""
N = int(np.log2(len(rho)))
rho = rho.reshape((2,) * N)
rho = np.flip(rho, axis=subset)
rho = rho.reshape((2 ** N,))
return rho
# # if there is only one int in subset, the following is actually faster
# # than np.flip for some reason
# for n in subset:
# rho = np.swapaxes(np.swapaxes(rho, 0, n)[::-1], 0, n)
# #old, slow implementation
# j=0
# for n in nn:
# k=int(np.log2(len(rho)))-1-n
# j=j^(1<<k) # + would be slightly faster than ^ but can lead to weird behaviour
# mu=np.zeros(len(rho))
# for i in range(len(mu)):
# mu[i]=rho[i^j]
# return mu
def znoisy(rho, qubit_index, graph=None):
"""Applies sigma_z noise on the specified qubit.
Parameters
----------
rho : np.ndarray
Is the state acted on. Should be a 2**N-dimensional vector
with the diagonal entries of the density matrix in the graph
state basis.
qubit_index : int
The `qubit_index`-th qubit is affected, counting starts at 0. Indices
are expected to be in order 012...(N-1) regardless of coloring of
the vertices.
graph : Graph, optional
Specifies in which graph state basis rho is given.
This function does not use it. Included only so znoisy can be called
the same way as xnoisy and ynoisy. Default: None
Returns
-------
np.ndarray
The state after the action. Same shape as `rho`.
"""
return noisy(rho, [qubit_index])
def xnoisy(rho, qubit_index, graph):
"""Applies sigma_x noise on the specified qubit.
Parameters
----------
rho : np.ndarray
Is the state acted on. Should be a 2**N-dimensional vector
with the diagonal entries of the density matrix in the graph
state basis.
qubit_index : int
The `qubit_index`-th qubit is affected, counting starts at 0. Indices
are expected to be in order 012...(N-1) regardless of coloring of
the vertices.
graph : Graph
Specifies in which graph state basis rho is given.
Returns
-------
np.ndarray
The state after the action. Same shape as `rho`.
"""
nn = []
for i in range(graph.N):
if graph.adj[qubit_index, i]:
nn += [i]
return noisy(rho, nn)
def ynoisy(rho, qubit_index, graph):
"""Applies sigma_y noise on the specified qubit.
Parameters
----------
rho : np.ndarray
Is the state acted on. Should be a 2**N-dimensional vector
with the diagonal entries of the density matrix in the graph
state basis.
qubit_index : int
The `qubit_index`-th qubit is affected, counting starts at 0. Indices
are expected to be in order 012...(N-1) regardless of coloring of
the vertices.
graph : Graph
Specifies in which graph state basis rho is given.
Returns
-------
np.ndarray
The state after the action. Same shape as `rho`.
"""
nn = [qubit_index]
for i in range(graph.N):
if graph.adj[qubit_index, i]:
nn += [i]
return noisy(rho, nn)
def znoise(rho, qubit_index, p, graph=None):
"""Apply Pauli-Z noise channel with error parameter `p` on a qubit.
Parameters
----------
rho : np.ndarray
Is the state acted on. Should be a 2**N-dimensional vector
with the diagonal entries of the density matrix in the graph
state basis.
qubit_index : int
The `qubit_index`-th qubit is affected, counting starts at 0. Indices
are expected to be in order 012...(N-1) regardless of coloring of
the vertices.
p : scalar
Error parameter of the channel should be in interval [0, 1].
graph : Graph, optional
Specifies in which graph state basis rho is given. Default: None
Returns
-------
np.ndarray
The state after the action. Same shape as `rho`.
"""
return p * rho + (1 - p) * znoisy(rho, qubit_index, graph)
def xnoise(rho, qubit_index, p, graph):
"""Apply Pauli-X noise channel with error parameter `p` on a qubit.
Parameters
----------
rho : np.ndarray
Is the state acted on. Should be a 2**N-dimensional vector
with the diagonal entries of the density matrix in the graph
state basis.
qubit_index : int
The `qubit_index`-th qubit is affected, counting starts at 0. Indices
are expected to be in order 012...(N-1) regardless of coloring of
the vertices.
p : scalar
Error parameter of the channel should be in interval [0, 1].
graph : Graph
Specifies in which graph state basis rho is given.
Returns
-------
np.ndarray
The state after the action. Same shape as `rho`.
"""
return p * rho + (1 - p) * xnoisy(rho, qubit_index, graph)
def ynoise(rho, qubit_index, p, graph):
"""Apply Pauli-Y noise channel with error parameter `p` on a qubit.
Parameters
----------
rho : np.ndarray
Is the state acted on. Should be a 2**N-dimensional vector
with the diagonal entries of the density matrix in the graph
state basis.
qubit_index : int
The `qubit_index`-th qubit is affected, counting starts at 0. Indices
are expected to be in order 012...(N-1) regardless of coloring of
the vertices.
p : scalar
Error parameter of the channel should be in interval [0, 1].
graph : Graph
Specifies in which graph state basis rho is given.
Returns
-------
np.ndarray
The state after the action. Same shape as `rho`.
"""
return p * rho + (1 - p) * ynoisy(rho, qubit_index, graph)
def wnoise(rho, qubit_index, p, graph):
"""Apply local white noise channel with error parameter `p` on a qubit.
Note: local white noise is often also called local depolarizing noise
Parameters
----------
rho : np.ndarray
Is the state acted on. Should be a 2**N-dimensional vector
with the diagonal entries of the density matrix in the graph
state basis.
qubit_index : int
The `qubit_index`-th qubit is affected, counting starts at 0. Indices
are expected to be in order 012...(N-1) regardless of coloring of
the vertices.
p : scalar
Error parameter of the channel should be in interval [0, 1].
graph : Graph
Specifies in which graph state basis rho is given.
Returns
-------
np.ndarray
The state after the action. Same shape as `rho`.
"""
return p * rho + (1 - p) / 4 * (
rho
+ xnoisy(rho, qubit_index, graph)
+ ynoisy(rho, qubit_index, graph)
+ znoisy(rho, qubit_index, graph)
)
def noise_pattern(rho, qubit_index, ps, graph):
"""Applies a local pauli-diagonal noise channel on the specified qubit.
Parameters
----------
rho : np.ndarray
Is the state acted on. Should be a 2**N-dimensional vector
with the diagonal entries of the density matrix in the graph
state basis.
qubit_index : int
The `n`-th qubit is affected, counting starts at 0. Indices are
expected to be in order
012...(N-1) regardless of coloring of the vertices.
ps : list of scalars
The coefficients of the noise channel.
Should have 4 entries p_0 p_x p_y p_z that sum to 1.
graph : Graph
Specifies in which graph state basis rho is given.
Returns
-------
np.ndarray
The state after the action. Same shape as `rho`.
"""
return (
ps[0] * rho
+ ps[1] * xnoisy(rho, qubit_index, graph)
+ ps[2] * ynoisy(rho, qubit_index, graph)
+ ps[3] * znoisy(rho, qubit_index, graph)
)
def wnoise_all(rho, p, graph):
"""Apply local white noise with the same error parameter to all qubits.
Parameters
----------
rho : np.ndarray
Is the state acted on. Should be a 2**N-dimensional vector
with the diagonal entries of the density matrix in the graph
state basis.
p : scalar
Error parameter of the channel should be in interval [0, 1].
graph : Graph
Specifies the graphstate considered.
Returns
-------
np.ndarray
The state after the action. Same shape as `rho`.
"""
for i in range(int(np.log2(len(rho)))):
rho = wnoise(rho, i, p, graph)
return rho
def noise_global(rho, p, graph=None):
"""Apply a global white noise channel to the state.
Parameters
----------
rho : np.ndarray
Is the state acted on. Should be a 2**N-dimensional vector
with the diagonal entries of the density matrix in the graph
state basis.
p : scalar
Error parameter of the channel should be in interval [0, 1].
graph : Graph, optional
Specifies in which graph state basis rho is given.
This function does not use it - only included for consistency. Default: None
Returns
-------
np.ndarray
The state after the action. Same shape as `rho`.
"""
k = len(rho)
return p * rho + (1 - p) * np.ones(k) / k
# ====Functions related to distance measures==== #
def fidelity(rho, mu):
"""Calculate fidelity of two states given in the same graph state basis.
This is a special case of the general definition of the fidelity:
F(rho, mu) = (tr(sqrt(sqrt(rho), mu, sqrt(rho))))**2
Note that the term "fidelity" has been used ambiguously in quantum
information theory, either referring to F or sqrt(F). F as defined here is
the square of the fidelity as defined in Nielsen and Chuang.
sqrt(1 - F(rho, mu)) is a distance measure.
(1 - sqrt(F(rho, mu))) is a distance measure.
Parameters
----------
rho, mu : np.ndarray
Diagonal entries of quantum states given in the same graph state basis.
Returns
-------
scalar
The fidelity F.
"""
a = np.sqrt(rho)
b = np.sqrt(mu)
return np.dot(a, b) ** 2
def fid_alternative(rho, mu):
"""Alternative fidelity function.
Calculates sqrt(F) instead of F (as defined in the `fidelity` fucntion).
Parameters
----------
rho, mu : np.ndarray
Diagonal entries of quantum states given in the same graph state basis.
Returns
-------
scalar
sqrt(F)
"""
a = np.sqrt(rho)
b = np.sqrt(mu)
return np.dot(a, b)
def trace_distance(rho, mu):
"""Calculate the trace distance between to states in the same graph state basis.
Parameters
----------
rho, mu : np.ndarray
Diagonal entries of quantum states given in the same graph state basis.
Returns
-------
scalar
The trace distance.
"""
sigma = np.abs(rho - mu)
return 1 / 2 * np.sum(sigma)
# ====Auxiliary Functions==== #
def normalize(rho):
"""Normalize the state to trace = 1.
Also catches numerical phenomena with entries < 0.
Parameters
----------
rho : np.ndarray
The state to be normalized.
Returns
-------
np.ndarray
The normalized state. Same shape as `rho`.
"""
if np.any(rho < 0):
rho = np.copy(rho)
rho[rho < 0] = 0
return rho / np.sum(rho)
def local_complementation(n, graph):
"""Return the new graph after local complementation.
Careful: Subsets are just copied so the coloring is not updated!
Parameters
----------
n : int
Local complementation around the `n`-th vertex.
graph : Graph
The original graph.
Returns
-------
Graph
The graph after local complementation.
"""
# note that Graph is only a simple graph!
# crude implementation - surely this can be done better.
# get neighbourhood of n
Nn = []
for i in range(graph.N):
if graph.adj[i, n]:
Nn += [i]
new_adjmatrix = np.copy(graph.adj)
for i in Nn:
for k in Nn:
if k == i:
continue
new_adjmatrix[i, k] = (graph.adj[i, k] + 1) % 2
# get new edges from adjmatrix
new_edges = []
for i in range(graph.N):
for k in range(i + 1, graph.N):
if new_adjmatrix[i, k]:
new_edges += [(i, k)]
return Graph(
N=graph.N, E=new_edges, sets=graph.sets
) # just copies sets without thinking
# ====EPP functions for two-colorable states==== #
@lru_cache(maxsize=None) # this will be getting called a lot with the same input
def _mask_a(j, graph):
"""Spread a bit string on set a to the whole bitstring.
Takes an int representing a bit string of length size of set a
and spreads it to a length graph.N bit string with the bits set at the
correct places.
Example: graph.N = 4, graph.a = [0, 2], j=3 (bitstring "11")
will return 10 (bitstring "1010")
Example: graph.N = 4, graph.a = [0, 2], j=1 (bitstring "01")
will return 2 (bitstring "0010")
Parameters
----------
j : int
Representing a bit string of length len(graph.a).
graph : Graph
The graph containing information about the set.
Returns
-------
int
Representing a bit string of length graph.N, i.e. `j` spread out over
the appropriate positions in the bit string.
"""
m = ["0"] * graph.N
short_string = format(j, "0" + str(len(graph.a)) + "b")
for bit, idx in zip(short_string, graph.a):
m[idx] = bit
long_string = "".join(m)
return int(long_string, base=2)
@lru_cache(maxsize=None) # this will be getting called a lot with the same input
def _mask_b(j, graph):
"""Spread a bit string on set b to the whole bitstring.
Takes an int representing a bit string of length size of set b
and spreads it to a length graph.N bit string with the bits set at the
correct places.
Example: graph.N = 4, graph.b = [1, 3], j=3 (bitstring "11")
will return 10 (bitstring "0101")
Example: graph.N = 4, graph.b = [1, 3], j=1 (bitstring "01")
will return 2 (bitstring "0001")
Parameters
----------
j : int
Representing a bit string of length len(graph.b).
graph : Graph
The graph containing information about the set.
Returns
-------
int
Representing a bit string of length graph.N, i.e. `j` spread out over
the appropriate positions in the bit string.
"""
m = ["0"] * graph.N
short_string = format(j, "0" + str(len(graph.b)) + "b")
for bit, idx in zip(short_string, graph.b):
m[idx] = bit
long_string = "".join(m)
return int(long_string, base=2)
# Note: np.fromfunction does not help with speeding up p1p2, but cythonizing does!
def p1(rho, graph):
"""Perform protocol P1 of the ADB protocol for two-colorable graph states.
Implements equation (17) of Phys. Rev. A 71, 012319 (2005)
Preprint: https://arxiv.org/abs/quant-ph/0405045
Comment on the implementation:
The integers used here correspond to the bit strings in the publication
as follows:
i ~ γ_A,γ_B
j ~ 0,γ_B
k ~ ν_B
m ~ 0,ν_B
and therefore:
i^j ~ γ_A,0
(i^j)^m ~ γ_A,ν_B
i^m ~ γ_A,(γ_B ⊕ ν_B)
So the loop over k iterates over all possible ν_B. While equation (17)
suggests another loop over μ_B, there is only one μ_B = (γ_B ⊕ ν_B) that
fulfils the specified condition ν_B ⊕ μ_B = γ_B so another nested loop is
not necessary.
Parameters
----------
rho : np.ndarray
Diagonal entries of a density matrix in the graph state basis.
Two copies of this state will be used to perform the protocol.
graph : Graph
Graph of the target graph state to be purified. rho is given in this
graph state basis. Must contain coloring information of the
two-colorable graph state.
Returns
-------
np.ndarray
The output state of the protocol, assuming the purification step was
successful.
"""
mu = np.zeros(len(rho))
for i in range(2 ** graph.N):
j = i & (_mask_b((1 << len(graph.b)) - 1, graph))
for k in range(2 ** len(graph.b)):
m = _mask_b(k, graph)
mu[i] += rho[(i ^ j) ^ m] * rho[i ^ m]
mu = normalize(mu)
return mu
def p2(rho, graph):
"""Perform protocol P2 of the ADB protocol for two-colorable graph states.
Implements equation (19) of Phys. Rev. A 71, 012319 (2005)
Preprint: https://arxiv.org/abs/quant-ph/0405045
See docstring of p1 for comment on the iterations and bit strings.
Parameters
----------
rho : np.ndarray
Diagonal entries of a density matrix in the graph state basis.
Two copies of this state will be used to perform the protocol.
graph : Graph
Graph of the target graph state to be purified. rho is given in this
graph state basis. Must contain coloring information of the
two-colorable graph state.
Returns
-------
np.ndarray
The output state of the protocol, assuming the purification step was
successful.
"""
mu = np.zeros(len(rho))
for i in range(2 ** graph.N):
j = i & (_mask_a((1 << len(graph.a)) - 1, graph))
for k in range(2 ** len(graph.a)):
m = _mask_a(k, graph)
mu[i] += rho[(i ^ j) ^ m] * rho[i ^ m]
mu = normalize(mu)
return mu
def p1_var(rho, sigma, graph):
"""P1 but with two different input states instead of two copies of the same.
Parameters
----------
rho : np.ndarray
Diagonal entries of a density matrix in the graph state basis.
First input state.
sigma : np.ndarray
Diagonal entries of a density matrix in the graph state basis.
Second input state.
graph : Graph
Graph of the target graph state to be purified. rho and mu are
given in this graph state basis. Must contain coloring information
of the two-colorable graph state.
Returns
-------
np.ndarray
The output state of the protocol, assuming the purification step was
successful.
"""
mu = np.zeros(len(rho))
for i in range(2 ** graph.N):
j = i & (_mask_b((1 << len(graph.b)) - 1, graph))
for k in range(2 ** len(graph.b)):
m = _mask_b(k, graph)
mu[i] += rho[(i ^ j) ^ m] * sigma[i ^ m]
mu = normalize(mu)
return mu
def p2_var(rho, sigma, graph):
"""P2 but with two different input states instead of two copies of the same.
Parameters
----------
rho : np.ndarray
Diagonal entries of a density matrix in the graph state basis.
First input state.
sigma : np.ndarray
Diagonal entries of a density matrix in the graph state basis.
Second input state.
graph : Graph
Graph of the target graph state to be purified. rho and mu are
given in this graph state basis. Must contain coloring information
of the two-colorable graph state.
Returns
-------
np.ndarray
The output state of the protocol, assuming the purification step was
successful.
"""
mu = np.zeros(len(rho))
for i in range(2 ** graph.N):
j = i & (_mask_a((1 << len(graph.a)) - 1, graph))
for k in range(2 ** len(graph.a)):
m = _mask_a(k, graph)
mu[i] += rho[(i ^ j) ^ m] * sigma[i ^ m]
mu = normalize(mu)
return mu
# ====EPP functions for arbitrary graph states==== #
@lru_cache(maxsize=None)
def _mask_k(j, graph, subset):
"""Spread a bit string on a subset to the whole bitstring.
Takes an int representing a bit string of length len(myset)
and spreads it to a length graph.N bit string with the bits set at the
correct places.
Example: graph.N = 4, myset = (0, 2), j=3 (bitstring "11")
will return 10 (bitstring "1010")
Example: graph.N = 4, myset = (0, 2), j=1 (bitstring "01")
will return 2 (bitstring "0010")
Parameters
----------
j : int
Representing a bit string of length len(subset).
graph : Graph
The graph, basically just here for graph.N
subset : tuple of ints
A subset of vertices of the graph. Ideally use a tuple not a list to
allow caching to work.
Returns
-------
int
Representing a bit string of length graph.N, i.e. `j` spread out over
the appropriate positions in the bit string.
"""
m = ["0"] * graph.N
short_string = format(j, "0" + str(len(subset)) + "b")
for bit, idx in zip(short_string, subset):
m[idx] = bit
long_string = "".join(m)
return int(long_string, base=2)
def pk(rho, sigma, graph1, graph2, subset):
"""Perform sub-protocol P_k.
A sub-protocol of the entanglement purification protocol for all graph
states. Implements equation (8) of Phys. Rev. A 74, 052316 (2006)
Preprint: https://arxiv.org/abs/quant-ph/0606090
See docstring of p1 for comment on the iterations and bit strings.
Parameters
----------
rho : np.ndarray
Diagonal entries of a density matrix in the graph state basis
corresponding to `graph1`. Main input state.
sigma : np.ndarray
Diagonal entries of a density matrix in the graph state basis
corresponding to `graph2`. Auxiliary input state. Make sure it
has the same number of qubits as `rho` (expand with unconnected
vertices if needed).
graph1 : Graph
The main graph of the protocol.
graph2 : Graph
The auxiliary graph for the k-th subset for the k-th sub-protocol P_k.
Make sure it has the same number of vertices as `graph1`
(expand with unconnected vertices if needed).
subset : tuple of ints
A subset of vertices, corresponding to the k-th subset.
Returns
-------
np.ndarray
The output state of the protocol, assuming the purification step was
successful.
"""
mu = np.zeros(len(rho))
other_set = tuple(i for i in range(graph1.N) if i not in subset)
for i in range(2 ** graph1.N):
j = i & (_mask_k((1 << len(other_set)) - 1, graph1, other_set))
for k in range(2 ** len(other_set)):
m = _mask_k(k, graph1, other_set)
mu[i] += rho[(i ^ j) ^ m] * sigma[i ^ m]
mu = normalize(mu)
return mu
| 29.690476 | 87 | 0.609645 |
4a27702a5d1fe3100025cc44bc4dcd2e38f79cc4 | 606 | py | Python | src/statistics/dataset/utils.py | VincentXWD/CopyGirlsMoe | 779838ad89d7def99ec74985919bcb0feaa6736c | [
"MIT"
] | 68 | 2018-06-21T08:33:14.000Z | 2019-12-05T13:31:13.000Z | src/statistics/dataset/utils.py | VincentXWD/CopyGirlsMoe | 779838ad89d7def99ec74985919bcb0feaa6736c | [
"MIT"
] | 5 | 2018-06-25T13:34:13.000Z | 2019-09-19T11:42:33.000Z | src/statistics/dataset/utils.py | VincentXWD/CopyGirlsMoe | 779838ad89d7def99ec74985919bcb0feaa6736c | [
"MIT"
] | 11 | 2018-06-25T10:44:20.000Z | 2019-10-08T12:14:55.000Z | __author__ = 'Wendong Xu'
import os
import re
def get_image_path(raw_input_dir: str) -> list:
"""
get image path and id from root resource path.
:return: a list contains all images' path.
"""
result = []
for root, dirs, files in os.walk(raw_input_dir):
for file in files:
result.append(os.path.join(root, file))
return result
def read_list(list_path: str) -> list:
"""
:param list_path:
:return:
"""
avatar_list = []
with open(list_path) as fin:
avatar_list = fin.readlines()
avatar_list = list(map(lambda x: x.split(' '), avatar_list))
return avatar_list
| 20.2 | 62 | 0.665017 |
4a277032db9f6effd081629317aa01dd2b7dbc91 | 137,925 | py | Python | datasets/multi_eurlex/multi_eurlex.py | WojciechKusa/datasets | 1406a04c3e911cec2680d8bc513653e0cafcaaa4 | [
"Apache-2.0"
] | 10,608 | 2020-09-10T15:47:50.000Z | 2022-03-31T22:51:47.000Z | datasets/multi_eurlex/multi_eurlex.py | realChainLife/datasets | 98261e8b0b7be4dbaaa71ae188b950f7fbe51bbd | [
"Apache-2.0"
] | 2,396 | 2020-09-10T14:55:31.000Z | 2022-03-31T19:41:04.000Z | datasets/multi_eurlex/multi_eurlex.py | realChainLife/datasets | 98261e8b0b7be4dbaaa71ae188b950f7fbe51bbd | [
"Apache-2.0"
] | 1,530 | 2020-09-10T21:43:10.000Z | 2022-03-31T01:59:12.000Z | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MultiEURLEX - A multi-lingual and multi-label legal document classification dataset."""
import json
import os
import datasets
_CITATION = """\
@InProceedings{chalkidis-etal-2021-multieurlex,
author = {Chalkidis, Ilias
and Fergadiotis, Manos
and Androutsopoulos, Ion},
title = {MultiEURLEX -- A multi-lingual and multi-label legal document
classification dataset for zero-shot cross-lingual transfer},
booktitle = {Proceedings of the 2021 Conference on Empirical Methods
in Natural Language Processing},
year = {2021},
publisher = {Association for Computational Linguistics},
location = {Punta Cana, Dominican Republic},
}"""
_DESCRIPTION = """\
MultiEURLEX comprises 65k EU laws in 23 official EU languages (some low-ish resource).
Each EU law has been annotated with EUROVOC concepts (labels) by the Publication Office of EU.
As with the English EURLEX, the goal is to predict the relevant EUROVOC concepts (labels);
this is multi-label classification task (given the text, predict multiple labels).
"""
DATA_URL = "https://zenodo.org/record/5363165/files/multi_eurlex.tar.gz"
_LANGUAGES = [
"en",
"da",
"de",
"nl",
"sv",
"bg",
"cs",
"hr",
"pl",
"sk",
"sl",
"es",
"fr",
"it",
"pt",
"ro",
"et",
"fi",
"hu",
"lt",
"lv",
"el",
"mt",
]
_CONCEPTS = {
"level_1": [
"100149",
"100160",
"100148",
"100147",
"100152",
"100143",
"100156",
"100158",
"100154",
"100153",
"100142",
"100145",
"100150",
"100162",
"100159",
"100144",
"100151",
"100157",
"100161",
"100146",
"100155",
],
"level_2": [
"100215",
"100211",
"100213",
"100214",
"100209",
"100208",
"100216",
"100210",
"100212",
"100270",
"100273",
"100269",
"100272",
"100276",
"100274",
"100271",
"100268",
"100275",
"100203",
"100201",
"100202",
"100207",
"100205",
"100204",
"100199",
"100206",
"100200",
"100198",
"100192",
"100193",
"100191",
"100196",
"100197",
"100195",
"100194",
"100230",
"100231",
"100229",
"100227",
"100228",
"100226",
"100173",
"100170",
"100172",
"100171",
"100250",
"100245",
"100251",
"100246",
"100248",
"100249",
"100252",
"100247",
"100260",
"100262",
"100261",
"100237",
"100241",
"100238",
"100239",
"100240",
"100234",
"100236",
"100232",
"100235",
"100233",
"100164",
"100166",
"100169",
"100167",
"100165",
"100163",
"100168",
"100178",
"100181",
"100179",
"100184",
"100183",
"100180",
"100182",
"100222",
"100217",
"100219",
"100223",
"100218",
"100220",
"100221",
"100288",
"100285",
"100287",
"100289",
"100286",
"100264",
"100263",
"100265",
"100267",
"100266",
"100177",
"100174",
"100176",
"100175",
"100225",
"100224",
"100256",
"100255",
"100258",
"100253",
"100254",
"100257",
"100259",
"100278",
"100280",
"100284",
"100277",
"100279",
"100282",
"100281",
"100283",
"100190",
"100186",
"100185",
"100188",
"100189",
"100187",
"100244",
"100243",
"100242",
],
"level_3": [
"1754",
"5881",
"c_4ee83dea",
"2479",
"1268",
"181",
"1415",
"2789",
"85",
"3487",
"1820",
"4305",
"2688",
"317",
"3257",
"2459",
"108",
"1004",
"4050",
"1909",
"1916",
"4184",
"3307",
"540",
"965",
"2398",
"4619",
"2496",
"490",
"1690",
"2475",
"1687",
"1221",
"385",
"3300",
"52",
"3318",
"2690",
"4706",
"1700",
"2517",
"1882",
"1417",
"3820",
"4414",
"3879",
"4565",
"3798",
"3809",
"3810",
"1362",
"1401",
"1400",
"3895",
"1398",
"5359",
"1390",
"1386",
"1845",
"1410",
"1834",
"5615",
"1718",
"5614",
"2720",
"2507",
"3587",
"4303",
"1418",
"1388",
"3151",
"1804",
"1630",
"1000",
"1488",
"2489",
"2639",
"1039",
"2656",
"2632",
"929",
"394",
"2698",
"1012",
"2448",
"1018",
"424",
"2510",
"4602",
"2486",
"1809",
"1325",
"1326",
"1321",
"2504",
"1021",
"1310",
"8469",
"285",
"1452",
"2495",
"2149",
"4370",
"1016",
"6332",
"4671",
"3239",
"1850",
"4380",
"3156",
"2520",
"5842",
"2519",
"2292",
"2128",
"614",
"c_4c7717f3",
"2487",
"2449",
"2450",
"1786",
"1810",
"524",
"2488",
"87",
"5268",
"13",
"474",
"4291",
"138",
"3193",
"139",
"3584",
"11",
"3185",
"1155",
"54",
"2474",
"802",
"539",
"3581",
"4413",
"1164",
"1154",
"813",
"5985",
"445",
"4189",
"4703",
"4300",
"69",
"2469",
"3582",
"554",
"3451",
"2628",
"2647",
"3423",
"2464",
"1346",
"3461",
"3474",
"3467",
"2189",
"3476",
"2500",
"4697",
"6722",
"3450",
"99",
"3074",
"2460",
"3489",
"2734",
"2723",
"5877",
"711",
"1277",
"2711",
"2443",
"2442",
"1063",
"651",
"2505",
"2972",
"2477",
"2493",
"4363",
"4412",
"4630",
"2014",
"5962",
"1372",
"2320",
"3544",
"1652",
"5913",
"2476",
"3605",
"2551",
"4358",
"2814",
"937",
"962",
"2707",
"2481",
"2817",
"4185",
"2914",
"2478",
"3797",
"4418",
"3641",
"3632",
"3689",
"4415",
"3160",
"2494",
"3101",
"3131",
"4505",
"3092",
"1954",
"2181",
"2015",
"3098",
"4539",
"4522",
"2512",
"4515",
"2184",
"82",
"4543",
"2178",
"557",
"3209",
"5974",
"1627",
"1074",
"4299",
"2468",
"3522",
"1163",
"2491",
"3273",
"5275",
"1737",
"1802",
"4276",
"2180",
"2258",
"4436",
"41",
"3232",
"2242",
"53",
"2246",
"2166",
"77",
"6034",
"3062",
"517",
"2702",
"2703",
"4157",
"1748",
"695",
"4750",
"558",
"2186",
"4367",
"1953",
"883",
"2370",
"1282",
"2573",
"3025",
"1453",
"2152",
"2001",
"4045",
"4179",
"4704",
"5789",
"3935",
"4222",
"5371",
"2701",
"1538",
"5400",
"3929",
"523",
"584",
"7347",
"3901",
"538",
"3900",
"3908",
"3899",
"6922",
"565",
"567",
"4010",
"566",
"1432",
"3513",
"3878",
"3954",
"573",
"2351",
"5923",
"2472",
"1422",
"4486",
"2467",
"668",
"3683",
"3278",
"4705",
"2175",
"4554",
"1426",
"4488",
"1405",
"3028",
"5922",
"873",
"778",
"2059",
"795",
"784",
"4150",
"494",
"486",
"4366",
"1370",
"4361",
"2473",
"2012",
"3855",
"c_415bab24",
"119",
"2188",
"3716",
"4365",
"4826",
"2179",
"6015",
"1455",
"8380",
"2170",
"5286",
"2190",
"2172",
"2167",
"5285",
"2171",
"6157",
"2191",
"90",
"2187",
"5366",
"2767",
"3743",
"1411",
"2498",
"3737",
"2364",
"1274",
"1414",
"744",
"1412",
"5350",
"3783",
"753",
"5576",
"5051",
"5620",
"438855",
"1005",
"3258",
"4067",
"5640",
"2163",
"5130",
"5767",
"5769",
"1451",
"5758",
"4304",
"4057",
"4060",
"4040",
"5442",
"26",
"4017",
"5258",
"8473",
"525",
"4479",
"3956",
"3928",
"3952",
"6382",
"3941",
"3949",
"3946",
"5017",
"4314",
"2775",
"239",
"2763",
"6788",
"1360",
"656",
"2418",
"2417",
"5360",
"2412",
"1602",
"2416",
"2414",
"2413",
"1115",
"2737",
"2735",
"2736",
"1258",
"4416",
"6052",
"8301",
"1088",
"7816",
"8189",
"2564",
"2288",
"6138",
"6136",
"1520",
"1506",
"1329",
"337",
"7815",
"c_6acbfd25",
"1184",
"6135",
"7879",
"4840",
"7861",
"7855",
"7868",
"7817",
"7814",
"864",
"281",
"5424",
"311",
"3778",
"1087",
"2287",
"1086",
"911",
"912",
"913",
"909",
"914",
"1587",
"1293",
"1615",
"5259",
"1632",
"1297",
"5302",
"4069",
"1844",
"4607",
"2117",
"8365",
"c_2b2e17b1",
"5283",
"5776",
"c_964c9649",
"4690",
"6205",
"c_b2c019c8",
"7207",
"1794",
"4587",
"8369",
"187",
"4845",
"5083",
"5087",
"2148",
"594",
"5778",
"8278",
"c_c385cfb4",
"5780",
"8366",
"5265",
"1847",
"1105",
"1191",
"8367",
"2106",
"2838",
"727",
"5448",
"3791",
"2858",
"3889",
"956",
"2113",
"2705",
"1765",
"2848",
"2119",
"c_6e703074",
"122",
"5775",
"5434",
"2200",
"1658",
"5435",
"130",
"2079",
"5931",
"4590",
"c_789ead37",
"4483",
"5437",
"5655",
"2616",
"4256",
"1800",
"335",
"427",
"84",
"712",
"2402",
"2497",
"637",
"4006",
"3013",
"3659",
"4362",
"56",
"5848",
"2515",
"3058",
"371",
"343",
"2090",
"2524",
"3549",
"3150",
"3146",
"6011",
"833",
"1707",
"434743",
"1158",
"2470",
"c_98d1408a",
"2825",
"5781",
],
"original": [
"100149",
"100160",
"100148",
"100147",
"100152",
"100143",
"100156",
"100158",
"100154",
"100153",
"100142",
"100145",
"100150",
"100162",
"100159",
"100144",
"100151",
"100157",
"100161",
"100146",
"100155",
"100215",
"100211",
"100213",
"100214",
"100209",
"100208",
"100216",
"100210",
"100212",
"100270",
"100273",
"100269",
"100272",
"100276",
"100274",
"100271",
"100268",
"100275",
"100203",
"100201",
"100202",
"100207",
"100205",
"100204",
"100199",
"100206",
"100200",
"100198",
"100192",
"100193",
"100191",
"100196",
"100197",
"100195",
"100194",
"100230",
"100231",
"100229",
"100227",
"100228",
"100226",
"100173",
"100170",
"100172",
"100171",
"100250",
"100245",
"100251",
"100246",
"100248",
"100249",
"100252",
"100247",
"100260",
"100262",
"100261",
"100237",
"100241",
"100238",
"100239",
"100240",
"100234",
"100236",
"100232",
"100235",
"100233",
"100164",
"100166",
"100169",
"100167",
"100165",
"100163",
"100168",
"100178",
"100181",
"100179",
"100184",
"100183",
"100180",
"100182",
"100222",
"100217",
"100219",
"100223",
"100218",
"100220",
"100221",
"100288",
"100285",
"100287",
"100289",
"100286",
"100264",
"100263",
"100265",
"100267",
"100266",
"100177",
"100174",
"100176",
"100175",
"100225",
"100224",
"100256",
"100255",
"100258",
"100253",
"100254",
"100257",
"100259",
"100278",
"100280",
"100284",
"100277",
"100279",
"100282",
"100281",
"100283",
"100190",
"100186",
"100185",
"100188",
"100189",
"100187",
"100244",
"100243",
"100242",
"1754",
"5881",
"c_4ee83dea",
"2479",
"1268",
"181",
"1415",
"2789",
"85",
"3487",
"1820",
"4305",
"2688",
"317",
"3257",
"2459",
"108",
"1004",
"4050",
"1909",
"1916",
"4184",
"3307",
"540",
"965",
"2398",
"4619",
"2496",
"490",
"1690",
"2475",
"1687",
"1221",
"385",
"3300",
"52",
"3318",
"2690",
"4706",
"1700",
"2517",
"1882",
"1417",
"3820",
"4414",
"3879",
"4565",
"3798",
"3809",
"3810",
"1362",
"1401",
"1400",
"3895",
"1398",
"5359",
"1390",
"1386",
"1845",
"1410",
"1834",
"5615",
"1718",
"5614",
"2720",
"2507",
"3587",
"4303",
"1418",
"1388",
"3151",
"1804",
"1630",
"1000",
"1488",
"2489",
"2639",
"1039",
"2656",
"2632",
"929",
"394",
"2698",
"1012",
"2448",
"1018",
"424",
"2510",
"4602",
"2486",
"1809",
"1325",
"1326",
"1321",
"2504",
"1021",
"1310",
"8469",
"285",
"1452",
"2495",
"2149",
"4370",
"1016",
"6332",
"4671",
"3239",
"1850",
"4380",
"3156",
"2520",
"5842",
"2519",
"2292",
"2128",
"614",
"c_4c7717f3",
"2487",
"2449",
"2450",
"1786",
"1810",
"524",
"2488",
"87",
"5268",
"13",
"474",
"4291",
"138",
"3193",
"139",
"3584",
"11",
"3185",
"1155",
"54",
"2474",
"802",
"539",
"3581",
"4413",
"1164",
"1154",
"813",
"5985",
"445",
"4189",
"4703",
"4300",
"69",
"2469",
"3582",
"554",
"3451",
"2628",
"2647",
"3423",
"2464",
"1346",
"3461",
"3474",
"3467",
"2189",
"3476",
"2500",
"4697",
"6722",
"3450",
"99",
"3074",
"2460",
"3489",
"2734",
"2723",
"5877",
"711",
"1277",
"2711",
"2443",
"2442",
"1063",
"651",
"2505",
"2972",
"2477",
"2493",
"4363",
"4412",
"4630",
"2014",
"5962",
"1372",
"2320",
"3544",
"1652",
"5913",
"2476",
"3605",
"2551",
"4358",
"2814",
"937",
"962",
"2707",
"2481",
"2817",
"4185",
"2914",
"2478",
"3797",
"4418",
"3641",
"3632",
"3689",
"4415",
"3160",
"2494",
"3101",
"3131",
"4505",
"3092",
"1954",
"2181",
"2015",
"3098",
"4539",
"4522",
"2512",
"4515",
"2184",
"82",
"4543",
"2178",
"557",
"3209",
"5974",
"1627",
"1074",
"4299",
"2468",
"3522",
"1163",
"2491",
"3273",
"5275",
"1737",
"1802",
"4276",
"2180",
"2258",
"4436",
"41",
"3232",
"2242",
"53",
"2246",
"2166",
"77",
"6034",
"3062",
"517",
"2702",
"2703",
"4157",
"1748",
"695",
"4750",
"558",
"2186",
"4367",
"1953",
"883",
"2370",
"1282",
"2573",
"3025",
"1453",
"2152",
"2001",
"4045",
"4179",
"4704",
"5789",
"3935",
"4222",
"5371",
"2701",
"1538",
"5400",
"3929",
"523",
"584",
"7347",
"3901",
"538",
"3900",
"3908",
"3899",
"6922",
"565",
"567",
"4010",
"566",
"1432",
"3513",
"3878",
"3954",
"573",
"2351",
"5923",
"2472",
"1422",
"4486",
"2467",
"668",
"3683",
"3278",
"4705",
"2175",
"4554",
"1426",
"4488",
"1405",
"3028",
"5922",
"873",
"778",
"2059",
"795",
"784",
"4150",
"494",
"486",
"4366",
"1370",
"4361",
"2473",
"2012",
"3855",
"c_415bab24",
"119",
"2188",
"3716",
"4365",
"4826",
"2179",
"6015",
"1455",
"8380",
"2170",
"5286",
"2190",
"2172",
"2167",
"5285",
"2171",
"6157",
"2191",
"90",
"2187",
"5366",
"2767",
"3743",
"1411",
"2498",
"3737",
"2364",
"1274",
"1414",
"744",
"1412",
"5350",
"3783",
"753",
"5576",
"5051",
"5620",
"438855",
"1005",
"3258",
"4067",
"5640",
"2163",
"5130",
"5767",
"5769",
"1451",
"5758",
"4304",
"4057",
"4060",
"4040",
"5442",
"26",
"4017",
"5258",
"8473",
"525",
"4479",
"3956",
"3928",
"3952",
"6382",
"3941",
"3949",
"3946",
"5017",
"4314",
"2775",
"239",
"2763",
"6788",
"1360",
"656",
"2418",
"2417",
"5360",
"2412",
"1602",
"2416",
"2414",
"2413",
"1115",
"2737",
"2735",
"2736",
"1258",
"4416",
"6052",
"8301",
"1088",
"7816",
"8189",
"2564",
"2288",
"6138",
"6136",
"1520",
"1506",
"1329",
"337",
"7815",
"c_6acbfd25",
"1184",
"6135",
"7879",
"4840",
"7861",
"7855",
"7868",
"7817",
"7814",
"864",
"281",
"5424",
"311",
"3778",
"1087",
"2287",
"1086",
"911",
"912",
"913",
"909",
"914",
"1587",
"1293",
"1615",
"5259",
"1632",
"1297",
"5302",
"4069",
"1844",
"4607",
"2117",
"8365",
"c_2b2e17b1",
"5283",
"5776",
"c_964c9649",
"4690",
"6205",
"c_b2c019c8",
"7207",
"1794",
"4587",
"8369",
"187",
"4845",
"5083",
"5087",
"2148",
"594",
"5778",
"8278",
"c_c385cfb4",
"5780",
"8366",
"5265",
"1847",
"1105",
"1191",
"8367",
"2106",
"2838",
"727",
"5448",
"3791",
"2858",
"3889",
"956",
"2113",
"2705",
"1765",
"2848",
"2119",
"c_6e703074",
"122",
"5775",
"5434",
"2200",
"1658",
"5435",
"130",
"2079",
"5931",
"4590",
"c_789ead37",
"4483",
"5437",
"5655",
"2616",
"4256",
"1800",
"335",
"427",
"84",
"712",
"2402",
"2497",
"637",
"4006",
"3013",
"3659",
"4362",
"56",
"5848",
"2515",
"3058",
"371",
"343",
"2090",
"2524",
"3549",
"3150",
"3146",
"6011",
"833",
"1707",
"434743",
"1158",
"2470",
"c_98d1408a",
"2825",
"5781",
"6152",
"c_28750470",
"5431",
"1758",
"5939",
"5716",
"5719",
"1759",
"1760",
"5717",
"5104",
"1756",
"837",
"5718",
"5714",
"1762",
"1757",
"7354",
"c_cbdf29ef",
"8447",
"c_9b88f778",
"6567",
"5720",
"5715",
"1153",
"4474",
"838",
"1304",
"1859",
"2923",
"1852",
"4455",
"5970",
"5907",
"c_c4afa011",
"c_31da5694",
"5764",
"c_60d3928d",
"5921",
"273",
"3365",
"4116",
"5689",
"2446",
"6569",
"c_4523e1dd",
"1590",
"4849",
"1279",
"1284",
"465",
"2318",
"2785",
"2773",
"3593",
"5821",
"5487",
"3597",
"3594",
"3595",
"2790",
"5785",
"86",
"2083",
"1205",
"1202",
"3486",
"4214",
"7131",
"c_6c4d5118",
"772",
"1353",
"6002",
"1470",
"1420",
"4277",
"2873",
"1951",
"2023",
"2810",
"4837",
"2697",
"3598",
"3601",
"4096",
"1680",
"5667",
"439",
"4097",
"326",
"5171",
"7374",
"5996",
"c_827bea7d",
"4873",
"3296",
"5987",
"c_57946f1a",
"1508",
"7397",
"7387",
"1454",
"c_847fc9f2",
"6561",
"5030",
"3297",
"1536",
"3904",
"6009",
"1244",
"4101",
"6560",
"7371",
"6563",
"3200",
"4089",
"5322",
"442",
"1364",
"2840",
"1780",
"2278",
"2022",
"283",
"6558",
"4105",
"6924",
"102",
"105",
"103",
"4209",
"3357",
"2929",
"4143",
"826",
"3361",
"3358",
"c_16e35fe6",
"6233",
"3360",
"250",
"2605",
"4028",
"6564",
"3359",
"977",
"1914",
"1922",
"2509",
"1302",
"6540",
"1921",
"724",
"1912",
"3322",
"1948",
"259",
"3320",
"1913",
"1911",
"1925",
"1910",
"1917",
"1924",
"1923",
"2065",
"1920",
"2339",
"2238",
"2341",
"5345",
"6538",
"74",
"2334",
"2335",
"3309",
"3311",
"7414",
"719",
"3315",
"6536",
"2099",
"570",
"3314",
"483",
"4581",
"117",
"5296",
"1818",
"3317",
"3316",
"3312",
"7352",
"2239",
"967",
"966",
"5961",
"6535",
"4234",
"5390",
"2501",
"3301",
"5159",
"6539",
"6537",
"3168",
"861",
"3281",
"2391",
"5173",
"6033",
"3124",
"4167",
"3376",
"6035",
"662",
"1805",
"2691",
"c_d59e7560",
"847",
"3377",
"4443",
"4618",
"4442",
"1456",
"4850",
"4058",
"507",
"515",
"1693",
"5553",
"1694",
"1692",
"556",
"1695",
"271",
"5309",
"3378",
"4243",
"2806",
"775",
"2327",
"3134",
"4598",
"4874",
"1223",
"1222",
"1773",
"2029",
"589",
"1991",
"2092",
"2484",
"3460",
"66",
"2559",
"389",
"2558",
"3245",
"3337",
"1864",
"3327",
"5799",
"4852",
"3324",
"405",
"6544",
"4231",
"4343",
"5314",
"442958",
"5325",
"5634",
"4374",
"4728",
"2823",
"4475",
"3352",
"5800",
"374",
"6738",
"6542",
"1764",
"1691",
"4323",
"1162",
"3348",
"3353",
"1817",
"442990",
"5160",
"7195",
"5169",
"6046",
"5170",
"5291",
"101",
"432",
"6549",
"5382",
"2270",
"1990",
"5323",
"3250",
"3329",
"4702",
"3334",
"5903",
"4470",
"4634",
"5166",
"4245",
"4564",
"3179",
"6552",
"4086",
"282",
"3344",
"1231",
"6215",
"3919",
"1469",
"3347",
"1133",
"1708",
"81",
"3346",
"580",
"1340",
"6823",
"1886",
"3837",
"4084",
"1883",
"1885",
"1884",
"3824",
"3831",
"3828",
"3827",
"3826",
"708",
"1945",
"1889",
"1868",
"4228",
"3829",
"2766",
"5289",
"3825",
"1514",
"5503",
"5447",
"6012",
"5678",
"626",
"1826",
"4831",
"154",
"1830",
"3880",
"6916",
"153",
"509",
"1523",
"3882",
"1145",
"510",
"1512",
"4211",
"945",
"3",
"6915",
"2747",
"28",
"212",
"2420",
"1704",
"2372",
"2214",
"1229",
"5290",
"1887",
"c_b9c60592",
"4545",
"1275",
"5261",
"4062",
"1878",
"2213",
"51",
"5035",
"3817",
"2740",
"1143",
"1396",
"6914",
"c_e5d85c14",
"1387",
"2739",
"3808",
"2716",
"3861",
"3859",
"1833",
"1828",
"3864",
"3865",
"3876",
"849",
"2192",
"3874",
"50",
"1720",
"2726",
"1416",
"2706",
"5245",
"1391",
"2745",
"5646",
"1458",
"2755",
"2205",
"3897",
"1384",
"2774",
"1393",
"1867",
"1375",
"2754",
"6825",
"3842",
"1403",
"1413",
"2159",
"2169",
"1357",
"1358",
"5243",
"1361",
"1846",
"3841",
"1371",
"3840",
"155",
"4584",
"2378",
"3759",
"5440",
"5397",
"2549",
"1077",
"3846",
"4579",
"3851",
"1832",
"1994",
"1724",
"1723",
"1725",
"2758",
"2770",
"4224",
"3627",
"451069",
"4264",
"3290",
"3707",
"c_804a9afe",
"3224",
"3617",
"429",
"2802",
"3624",
"2677",
"8450",
"1957",
"1009",
"1356",
"1434",
"2405",
"1435",
"1407",
"1408",
"3619",
"3622",
"1402",
"1377",
"1376",
"5024",
"1726",
"3890",
"2783",
"4377",
"1397",
"4169",
"1368",
"3887",
"315",
"3886",
"3706",
"3477",
"3590",
"165",
"3567",
"3599",
"3630",
"3274",
"3723",
"3243",
"3244",
"3373",
"3187",
"549",
"34",
"3222",
"3673",
"2906",
"3242",
"c_dcf3f7c0",
"1485",
"1790",
"c_3e6af2e7",
"c_8f89faac",
"2150",
"4491",
"c_834b57c4",
"3233",
"4646",
"2787",
"728",
"1459",
"4263",
"4187",
"5156",
"2002",
"2502",
"560",
"1003",
"1007",
"6025",
"1001",
"6714",
"5154",
"1014",
"1002",
"c_10aa91c7",
"c_39d242fc",
"2466",
"1492",
"1489",
"1490",
"1491",
"1497",
"1496",
"1495",
"2463",
"1494",
"1072",
"233",
"1354",
"1025",
"2456",
"1348",
"5155",
"460",
"4235",
"3152",
"4248",
"188",
"2672",
"2662",
"2644",
"2659",
"2676",
"2682",
"3270",
"2649",
"2650",
"2657",
"2661",
"2645",
"2651",
"2643",
"4146",
"4623",
"2635",
"2668",
"2634",
"2653",
"2636",
"2642",
"2640",
"2664",
"2648",
"3272",
"2652",
"2638",
"2685",
"2673",
"3208",
"2689",
"2633",
"2674",
"2666",
"2665",
"2670",
"3268",
"2687",
"2681",
"4329",
"2660",
"2683",
"2679",
"4301",
"933",
"1675",
"296",
"3253",
"341",
"3350",
"173",
"189",
"763",
"402",
"398",
"397",
"403",
"5059",
"5061",
"875",
"917",
"3182",
"144",
"6028",
"2574",
"2615",
"3558",
"368",
"4383",
"3554",
"4282",
"6340",
"843",
"2403",
"5062",
"5972",
"5050",
"4241",
"1824",
"4569",
"4132",
"3119",
"3256",
"5052",
"3254",
"58",
"2319",
"2067",
"1017",
"5054",
"137",
"740",
"1682",
"5021",
"729",
"178",
"4763",
"1075",
"c_2d0e694e",
"c_6cfce4be",
"5797",
"c_c565cff5",
"5795",
"5883",
"5474",
"5555",
"204",
"1799",
"6335",
"4390",
"182",
"3227",
"1676",
"1969",
"2365",
"904",
"1825",
"c_20fde7af",
"1322",
"530",
"1324",
"1328",
"1323",
"1314",
"1312",
"1327",
"4406",
"1330",
"1331",
"4585",
"4409",
"4401",
"1015",
"4399",
"4400",
"4407",
"6343",
"504",
"3298",
"4279",
"935",
"c_c586f36c",
"1985",
"3261",
"365",
"3262",
"6736",
"561",
"1234",
"6342",
"57",
"6029",
"5566",
"170",
"924",
"3685",
"2230",
"3264",
"1316",
"1315",
"1320",
"1319",
"1313",
"1317",
"6713",
"6339",
"290",
"3229",
"286",
"2606",
"295",
"293",
"c_896e199b",
"292",
"297",
"289",
"298",
"8432",
"736",
"4118",
"3246",
"872",
"4738",
"4195",
"278",
"1130",
"3230",
"858",
"2959",
"3231",
"3251",
"4356",
"2151",
"2371",
"3252",
"408",
"3612",
"3994",
"521",
"3248",
"2447",
"1058",
"6055",
"5729",
"973",
"3449",
"3225",
"1677",
"739",
"3186",
"2219",
"423",
"4371",
"665",
"6333",
"4644",
"4633",
"4661",
"4798",
"1466",
"4344",
"4608",
"219",
"2069",
"2387",
"4078",
"4405",
"4793",
"502",
"4079",
"491",
"1069",
"5583",
"3948",
"4645",
"3178",
"4381",
"4402",
"4499",
"1637",
"3174",
"5843",
"4600",
"4836",
"4789",
"1232",
"4333",
"3611",
"3176",
"1091",
"4350",
"161",
"932",
"4385",
"2957",
"2050",
"4885",
"4025",
"2317",
"4186",
"946",
"4748",
"613",
"622",
"1309",
"3189",
"4643",
"4573",
"c_34465dac",
"c_163e1e96",
"480",
"3166",
"1010",
"5542",
"2912",
"1486",
"4",
"3123",
"635",
"2804",
"5707",
"3172",
"9",
"10",
"692",
"6364",
"2590",
"3165",
"3170",
"2182",
"2967",
"927",
"619",
"4605",
"618",
"3171",
"4073",
"1807",
"1795",
"1803",
"1806",
"1801",
"4219",
"20",
"1797",
"5725",
"2218",
"1796",
"6365",
"1798",
"6366",
"522",
"6062",
"3570",
"3167",
"6268",
"288",
"3271",
"2742",
"720",
"2589",
"893",
"2756",
"722",
"4663",
"5710",
"1688",
"2602",
"4983",
"2862",
"3173",
"3204",
"2803",
"2081",
"895",
"1423",
"6371",
"3205",
"1562",
"4104",
"3207",
"5451",
"2786",
"3218",
"1078",
"5",
"6010",
"1055",
"2045",
"1684",
"6",
"5140",
"4280",
"47",
"6216",
"2836",
"4880",
"4878",
"3195",
"c_a62dbeba",
"5317",
"4099",
"3198",
"4879",
"3197",
"140",
"4198",
"147",
"143",
"146",
"3202",
"377",
"393",
"396",
"1135",
"3201",
"4208",
"145",
"809",
"3579",
"810",
"3577",
"6081",
"2771",
"8",
"2748",
"621",
"2781",
"4130",
"1616",
"6367",
"1337",
"5151",
"620",
"482",
"1567",
"2851",
"4524",
"4828",
"2452",
"2275",
"6173",
"4817",
"1791",
"2864",
"6013",
"2216",
"957",
"4675",
"1333",
"5092",
"5964",
"55",
"60",
"61",
"3602",
"3446",
"75",
"1635",
"435330",
"805",
"804",
"803",
"3006",
"4574",
"806",
"1592",
"183",
"1591",
"3145",
"2696",
"5541",
"1979",
"2565",
"2646",
"2863",
"2998",
"4242",
"2993",
"2100",
"588",
"6134",
"2135",
"2134",
"174",
"c_03f9a8ac",
"2165",
"4360",
"1165",
"5480",
"1782",
"4855",
"1811",
"3443",
"2629",
"1161",
"1156",
"6064",
"c_406ad4cc",
"7926",
"c_5b447e3a",
"2792",
"6910",
"4286",
"814",
"830",
"829",
"3005",
"820",
"819",
"824",
"6754",
"828",
"816",
"821",
"815",
"822",
"5206",
"827",
"817",
"823",
"2359",
"1178",
"1247",
"4193",
"227",
"4202",
"874",
"4201",
"4191",
"4194",
"1200",
"3007",
"3060",
"4204",
"6749",
"6748",
"6174",
"6753",
"5374",
"1672",
"5372",
"2889",
"150",
"7201",
"115",
"3121",
"3543",
"447",
"871",
"3542",
"3004",
"1122",
"2129",
"110",
"1193",
"2264",
"c_5f90006e",
"7948",
"6752",
"45",
"1956",
"7942",
"4492",
"7956",
"c_5a7a0d82",
"6751",
"5205",
"1307",
"644",
"215",
"733",
"3583",
"5081",
"2831",
"7",
"1365",
"2303",
"4292",
"3452",
"3458",
"5610",
"5786",
"3439",
"3442",
"5591",
"3437",
"5783",
"3441",
"7809",
"3440",
"3436",
"4127",
"2637",
"5321",
"3431",
"3420",
"1944",
"4417",
"2567",
"6363",
"3429",
"3432",
"7987",
"c_9104c45f",
"4156",
"4822",
"2905",
"6380",
"3447",
"6379",
"2499",
"472",
"5053",
"3448",
"7340",
"2005",
"3488",
"4596",
"1347",
"359",
"5121",
"1345",
"4216",
"3463",
"3466",
"3465",
"5420",
"2901",
"5870",
"5537",
"2095",
"1379",
"5833",
"6007",
"2928",
"2850",
"5574",
"3462",
"1663",
"3219",
"2087",
"3223",
"3480",
"5744",
"3241",
"3479",
"3228",
"3196",
"c_39046afb",
"3870",
"3481",
"5124",
"3650",
"3206",
"3483",
"435",
"3468",
"3473",
"3482",
"3470",
"5486",
"2926",
"5638",
"6704",
"6372",
"5449",
"5126",
"c_99e6dc30",
"4275",
"4270",
"3500",
"2986",
"5831",
"8355",
"3502",
"8357",
"3501",
"c_e78f03db",
"8354",
"4269",
"8356",
"3499",
"c_7d7608fa",
"c_ff638149",
"2053",
"1306",
"1517",
"2071",
"3498",
"406",
"1869",
"4696",
"1743",
"5773",
"2485",
"6024",
"c_959c23de",
"3496",
"3112",
"6117",
"1930",
"5753",
"5957",
"3505",
"3506",
"3504",
"3453",
"6927",
"411",
"440",
"6375",
"5335",
"c_47ea173d",
"498",
"3495",
"c_54f7dd1f",
"1214",
"c_7cbc24fd",
"1436",
"541",
"1210",
"3493",
"3076",
"5125",
"3075",
"c_f7430876",
"c_50620749",
"807",
"3077",
"899",
"737",
"4055",
"881",
"5337",
"949",
"845",
"969",
"922",
"3003",
"910",
"986",
"500",
"2909",
"4221",
"207",
"6373",
"224",
"210",
"208",
"226",
"5551",
"220",
"5546",
"2285",
"213",
"435164",
"6917",
"2300",
"223",
"c_8f1cd55b",
"5545",
"4121",
"217",
"4123",
"216",
"221",
"2298",
"6920",
"218",
"211",
"209",
"5779",
"3491",
"225",
"4225",
"1763",
"324",
"2625",
"2131",
"1031",
"3409",
"322",
"330",
"2396",
"7133",
"323",
"6321",
"2025",
"320",
"5792",
"4734",
"329",
"6320",
"318",
"1260",
"5462",
"1755",
"1856",
"768",
"4095",
"4251",
"3408",
"2115",
"715",
"721",
"4427",
"6322",
"713",
"1224",
"4163",
"1898",
"1585",
"1167",
"1937",
"1929",
"4743",
"1919",
"2989",
"2511",
"8459",
"c_8b0ac3e2",
"2173",
"2971",
"2969",
"2444",
"2965",
"1595",
"2970",
"2445",
"6100",
"2915",
"4117",
"638",
"6311",
"6308",
"1849",
"579",
"1788",
"1598",
"7204",
"2404",
"6328",
"6323",
"6329",
"5316",
"1064",
"1066",
"8437",
"1068",
"1067",
"1065",
"2992",
"2994",
"943",
"6331",
"563",
"2995",
"4352",
"5219",
"4336",
"2920",
"6326",
"1513",
"2893",
"926",
"2194",
"2975",
"2977",
"2974",
"2892",
"4404",
"2622",
"2624",
"2068",
"3576",
"1686",
"1201",
"2976",
"1144",
"2938",
"5096",
"1958",
"4298",
"4771",
"4312",
"3265",
"5373",
"1342",
"222",
"319",
"6317",
"2419",
"568",
"2544",
"327",
"328",
"636",
"4142",
"6318",
"624",
"2536",
"2596",
"370",
"4490",
"1521",
"2842",
"1483",
"2925",
"2410",
"832",
"325",
"4444",
"4338",
"4708",
"4450",
"4448",
"4445",
"4447",
"4674",
"152",
"764",
"2409",
"4081",
"846",
"c_39cda9ba",
"2985",
"2560",
"842",
"2211",
"5034",
"5252",
"6918",
"4612",
"2718",
"2310",
"c_cb1ce6ff",
"4260",
"2307",
"2311",
"2384",
"3000",
"2999",
"714",
"2435",
"313",
"2395",
"1961",
"2411",
"2305",
"2308",
"2561",
"1035",
"2921",
"4790",
"1159",
"2879",
"2556",
"5255",
"2455",
"c_8702d5f7",
"5228",
"3679",
"3682",
"3424",
"2725",
"4257",
"5906",
"936",
"1738",
"2981",
"2980",
"1525",
"577",
"6314",
"2983",
"939",
"559",
"942",
"615",
"1947",
"2655",
"2978",
"3009",
"4811",
"5226",
"983",
"944",
"4337",
"938",
"586",
"963",
"1890",
"964",
"982",
"3634",
"2721",
"2713",
"2722",
"4285",
"6292",
"1465",
"6294",
"4863",
"c_b18ab65c",
"479",
"4239",
"2399",
"2177",
"5147",
"3304",
"3633",
"5123",
"c_ab46334d",
"529",
"2816",
"5238",
"1487",
"3669",
"6302",
"3672",
"1439",
"1894",
"3670",
"2922",
"c_de4d6f2f",
"6305",
"3668",
"3671",
"6306",
"2916",
"3667",
"2924",
"5237",
"c_fea6771b",
"5236",
"3657",
"3654",
"3660",
"3586",
"2795",
"c_7bc27da2",
"3652",
"3655",
"2797",
"3656",
"c_99a79cea",
"2801",
"4890",
"4930",
"3795",
"3796",
"4111",
"3800",
"3801",
"6361",
"3640",
"3803",
"2076",
"3645",
"3649",
"4082",
"866",
"3997",
"71",
"179",
"1848",
"4253",
"3744",
"180",
"3695",
"3694",
"3691",
"3696",
"3698",
"3697",
"3692",
"3693",
"6299",
"3909",
"3651",
"923",
"3636",
"7219",
"5383",
"5928",
"5973",
"4496",
"7931",
"6362",
"3108",
"3117",
"3110",
"1645",
"4464",
"3111",
"4318",
"3115",
"3116",
"2457",
"1433",
"5295",
"5908",
"555",
"3109",
"4536",
"2400",
"5212",
"6850",
"648",
"4033",
"4379",
"4387",
"4805",
"5324",
"3100",
"6020",
"2325",
"3143",
"4162",
"172",
"6021",
"3107",
"4408",
"195",
"c_c8c80a5b",
"2039",
"c_ea3f5ed2",
"5471",
"3125",
"1033",
"5239",
"5216",
"5677",
"3093",
"4527",
"4510",
"4528",
"4537",
"4506",
"4516",
"4530",
"6851",
"2350",
"5152",
"3428",
"4529",
"4152",
"818",
"4535",
"3114",
"3113",
"c_4c441ea0",
"3120",
"4531",
"4511",
"4509",
"4541",
"3097",
"6848",
"4651",
"4526",
"4532",
"3614",
"4517",
"4518",
"4508",
"4519",
"4521",
"4540",
"4514",
"4533",
"4827",
"569",
"1037",
"5211",
"3130",
"5077",
"2041",
"3129",
"2513",
"3105",
"6911",
"1447",
"2040",
"1036",
"1032",
"3435",
"5210",
"2571",
"896",
"4550",
"1503",
"5208",
"592",
"4546",
"1653",
"1926",
"c_e4f135ba",
"4039",
"c_2efea99f",
"855",
"3574",
"4252",
"2161",
"238",
"3566",
"2082",
"533",
"6022",
"4000",
"3564",
"4887",
"1443",
"194",
"5587",
"2046",
"98",
"3572",
"2266",
"3374",
"5982",
"5975",
"6344",
"5983",
"5981",
"3515",
"5980",
"5979",
"5976",
"5977",
"5978",
"2284",
"3534",
"104",
"7212",
"c_090e8e94",
"3532",
"6345",
"7191",
"1073",
"7209",
"7210",
"6008",
"4552",
"5940",
"3529",
"3530",
"4549",
"3525",
"506",
"4547",
"2904",
"3519",
"2427",
"4029",
"4700",
"8549",
"731",
"3523",
"2330",
"1742",
"3518",
"2454",
"545",
"284",
"1646",
"5329",
"4332",
"3626",
"3623",
"2267",
"4090",
"4273",
"2342",
"2943",
"3562",
"5475",
"166",
"414",
"3559",
"3849",
"3850",
"3555",
"3653",
"687",
"1866",
"1026",
"3553",
"3556",
"3688",
"3557",
"3664",
"3843",
"2619",
"481",
"732",
"5080",
"2207",
"4563",
"6745",
"3540",
"c_95b02746",
"4555",
"4557",
"6354",
"c_9eb9cbaa",
"4562",
"4559",
"2868",
"2555",
"2550",
"1529",
"4556",
"3539",
"1739",
"1740",
"4561",
"4558",
"735",
"4054",
"5207",
"372",
"3537",
"1946",
"5632",
"6348",
"4113",
"4851",
"2593",
"3325",
"5755",
"c_a9a17fe5",
"2126",
"6350",
"376",
"6717",
"3854",
"3545",
"2788",
"2784",
"1011",
"1928",
"149",
"2422",
"5069",
"6014",
"244",
"3964",
"7343",
"6888",
"2261",
"2259",
"6886",
"7378",
"2255",
"7377",
"2262",
"2257",
"3050",
"2254",
"2253",
"2260",
"2251",
"2250",
"2252",
"7376",
"955",
"954",
"1137",
"582",
"5357",
"190",
"1599",
"5867",
"5549",
"7337",
"2245",
"2244",
"5068",
"18",
"5379",
"1197",
"2072",
"7952",
"3040",
"1303",
"1336",
"3680",
"1344",
"1778",
"3049",
"4328",
"7217",
"4120",
"362",
"2968",
"1046",
"340",
"5450",
"3088",
"4272",
"361",
"206",
"4136",
"6894",
"68",
"442917",
"5118",
"8466",
"5120",
"38",
"7934",
"5945",
"5852",
"c_46f6aaeb",
"878",
"c_9829e6b7",
"390",
"2296",
"7353",
"65",
"5119",
"5502",
"5501",
"2854",
"5657",
"27",
"391",
"4451",
"3031",
"1172",
"39",
"6897",
"4011",
"7365",
"2700",
"3862",
"5536",
"2695",
"3038",
"5217",
"4012",
"35",
"1438",
"3703",
"2861",
"1558",
"4449",
"6892",
"4119",
"4754",
"4762",
"1062",
"4091",
"3984",
"4147",
"5851",
"3313",
"4159",
"1750",
"1753",
"1746",
"4593",
"697",
"700",
"704",
"5607",
"699",
"696",
"701",
"5067",
"4759",
"4760",
"4761",
"3035",
"7215",
"5064",
"1471",
"360",
"5984",
"4766",
"2268",
"4765",
"5105",
"363",
"706",
"5097",
"4322",
"702",
"2966",
"6890",
"4368",
"3044",
"698",
"3982",
"3977",
"3981",
"3383",
"6882",
"6881",
"5622",
"5626",
"6885",
"5621",
"887",
"5625",
"5623",
"7197",
"4093",
"381",
"1588",
"1601",
"7216",
"1829",
"907",
"3096",
"1967",
"7413",
"2033",
"1478",
"4188",
"971",
"c_3a2df4eb",
"1785",
"5875",
"1138",
"2030",
"1823",
"6862",
"1479",
"135",
"633",
"1614",
"435136",
"32",
"905",
"3048",
"2583",
"3046",
"5114",
"2576",
"2578",
"2581",
"2582",
"2580",
"2579",
"2577",
"382",
"5483",
"6883",
"3002",
"7386",
"1977",
"3016",
"2020",
"4950",
"3020",
"1174",
"3413",
"1966",
"1173",
"436",
"2263",
"3042",
"2588",
"5805",
"6884",
"5669",
"5891",
"2009",
"1999",
"1751",
"1931",
"2000",
"2007",
"1198",
"419",
"2154",
"7373",
"2003",
"2011",
"2073",
"7416",
"470",
"3056",
"8428",
"2010",
"2004",
"4153",
"4729",
"c_57f3c49f",
"4730",
"2162",
"4087",
"c_2f00dd5a",
"c_af502da0",
"3057",
"4180",
"3561",
"4182",
"3226",
"5313",
"7184",
"4181",
"1749",
"1998",
"3517",
"49",
"5897",
"1192",
"6893",
"7189",
"309",
"4959",
"c_9eea2203",
"7385",
"3589",
"8419",
"3379",
"7187",
"5841",
"2155",
"3054",
"1437",
"3237",
"67",
"6017",
"7370",
"7390",
"7399",
"3922",
"578",
"527",
"576",
"1484",
"7350",
"6729",
"6611",
"6257",
"1589",
"c_7afb6cd4",
"1550",
"6609",
"528",
"148",
"5813",
"6023",
"485",
"5836",
"5415",
"5414",
"5835",
"c_0f3b8370",
"7929",
"435469",
"5817",
"2913",
"2704",
"2699",
"3974",
"6617",
"3966",
"c_6496f5ea",
"6619",
"6618",
"5809",
"8458",
"930",
"5954",
"1080",
"5616",
"3969",
"7200",
"5596",
"2819",
"3936",
"2031",
"2818",
"234",
"948",
"4503",
"3938",
"3937",
"4625",
"546",
"2249",
"2813",
"5093",
"3932",
"2812",
"2815",
"3497",
"186",
"96",
"4274",
"164",
"5598",
"2833",
"564",
"3902",
"3915",
"1626",
"520",
"3917",
"2828",
"857",
"7348",
"3916",
"c_ba4acdb2",
"585",
"3911",
"3906",
"1620",
"1636",
"1628",
"1619",
"1625",
"583",
"543",
"1621",
"1618",
"455",
"454",
"458",
"3912",
"456",
"5592",
"3910",
"459",
"2834",
"6254",
"6230",
"6231",
"457",
"688",
"516",
"6628",
"3914",
"7411",
"689",
"512",
"513",
"3913",
"514",
"3565",
"435196",
"4009",
"8457",
"3503",
"518",
"4334",
"8456",
"542",
"8411",
"1633",
"551",
"581",
"4238",
"537",
"6923",
"2034",
"552",
"2035",
"3521",
"488",
"7944",
"8417",
"245",
"5571",
"574",
"8420",
"3355",
"6612",
"3943",
"3944",
"8449",
"3993",
"5489",
"6615",
"6614",
"5142",
"5490",
"4695",
"5631",
"2314",
"3959",
"5271",
"3958",
"2631",
"3957",
"3588",
"1549",
"3953",
"3962",
"3963",
"2353",
"3955",
"877",
"3961",
"575",
"7349",
"1537",
"3990",
"2441",
"2124",
"1735",
"1938",
"5141",
"1543",
"2982",
"1539",
"5692",
"42",
"1541",
"5691",
"6623",
"6621",
"c_bf1e2c81",
"6140",
"c_5ea6e5c4",
"5351",
"5196",
"1367",
"453",
"3422",
"3404",
"156",
"3585",
"4330",
"616",
"4359",
"5422",
"4220",
"1482",
"3030",
"5200",
"6909",
"4441",
"6120",
"4501",
"4478",
"4072",
"671",
"6763",
"5905",
"5178",
"269",
"5177",
"1597",
"280",
"590",
"3968",
"5176",
"5175",
"1419",
"854",
"2397",
"384",
"2932",
"674",
"673",
"677",
"675",
"669",
"2313",
"781",
"676",
"779",
"3279",
"c_243b7be2",
"1784",
"4534",
"7341",
"1098",
"1444",
"1081",
"1950",
"7388",
"3216",
"3280",
"48",
"1380",
"1927",
"5266",
"4357",
"2794",
"2196",
"684",
"5094",
"2973",
"2195",
"373",
"3615",
"5102",
"3606",
"184",
"446",
"6037",
"925",
"3177",
"6261",
"3607",
"6275",
"6908",
"6262",
"5333",
"4430",
"3026",
"6019",
"5181",
"6030",
"3024",
"4821",
"5334",
"5595",
"773",
"1696",
"5613",
"3027",
"5949",
"c_04ae3ba8",
"5684",
"5355",
"5441",
"5862",
"628",
"631",
"5687",
"629",
"627",
"782",
"789",
"794",
"787",
"796",
"785",
"679",
"793",
"799",
"792",
"800",
"1071",
"790",
"801",
"1070",
"780",
"3277",
"788",
"682",
"3276",
"5603",
"678",
"783",
"672",
"798",
"670",
"680",
"681",
"791",
"2918",
"1079",
"c_40f54e0c",
"6188",
"774",
"441",
"1752",
"5109",
"8470",
"4295",
"438",
"4456",
"2859",
"1076",
"4137",
"5627",
"2891",
"5311",
"5628",
"7364",
"1902",
"1962",
"6765",
"7368",
"4144",
"1681",
"7363",
"487",
"5277",
"7379",
"5665",
"4905",
"c_cdec6719",
"6767",
"3930",
"5340",
"c_25ddd844",
"1978",
"452",
"4168",
"492",
"7369",
"4139",
"6766",
"242",
"31",
"5356",
"4626",
"5066",
"2492",
"451",
"6769",
"7398",
"4865",
"1705",
"1629",
"1385",
"1366",
"3580",
"1392",
"5184",
"667",
"1363",
"5188",
"2566",
"c_b12a760a",
"6088",
"4424",
"3255",
"5793",
"8418",
"5191",
"175",
"29",
"2013",
"2185",
"3873",
"2176",
"3856",
"2125",
"3869",
"c_a02d5941",
"2392",
"c_764bcd4e",
"25",
"5765",
"6163",
"3675",
"2026",
"5688",
"c_d3334ae3",
"5495",
"2199",
"2105",
"5479",
"7193",
"5991",
"1194",
"6129",
"c_9fe65404",
"4604",
"2147",
"1034",
"3514",
"5044",
"2070",
"5086",
"7202",
"5656",
"c_7bf10a38",
"6114",
"1045",
"3710",
"6083",
"2432",
"6016",
"1022",
"7933",
"2791",
"5250",
"8393",
"2431",
"4614",
"4002",
"258",
"3709",
"2885",
"124",
"123",
"8384",
"2145",
"1043",
"2093",
"2144",
"2142",
"2143",
"968",
"4616",
"4595",
"4591",
"992",
"7203",
"2130",
"450983",
"4812",
"2141",
"5249",
"4594",
"8381",
"1507",
"8382",
"4611",
"8383",
"2118",
"c_acf7832d",
"5264",
"6338",
"7220",
"5430",
"7415",
"4064",
"2116",
"3676",
"7211",
"6116",
"c_5bad9438",
"5738",
"4872",
"1094",
"1190",
"c_ce38fff4",
"3725",
"8402",
"4615",
"2094",
"6336",
"1838",
"5279",
"5446",
"120",
"8398",
"2103",
"2202",
"4586",
"5107",
"6159",
"8399",
"5341",
"4744",
"8400",
"8403",
"3722",
"2102",
"5530",
"2137",
"2146",
"8370",
"1657",
"987",
"1044",
"1664",
"8404",
"312",
"5282",
"c_2f64fd66",
"991",
"1510",
"3853",
"125",
"6207",
"906",
"5365",
"3720",
"2183",
"8488",
"6091",
"5557",
"5460",
"5248",
"6160",
"7186",
"3718",
"4597",
"3715",
"4588",
"4859",
"2123",
"7185",
"121",
"129",
"c_90ad7755",
"5284",
"176",
"421",
"2829",
"8396",
"5679",
"450898",
"4844",
"2381",
"1935",
"3756",
"3757",
"3754",
"3746",
"5386",
"5387",
"3745",
"5260",
"4489",
"3750",
"3753",
"3747",
"2821",
"3749",
"5861",
"6697",
"c_22f8c0e6",
"4631",
"2917",
"3785",
"642",
"534",
"4507",
"6044",
"4293",
"308",
"c_d8d35f08",
"475",
"3735",
"6079",
"1685",
"4886",
"5201",
"3736",
"746",
"203",
"4420",
"6043",
"3741",
"2772",
"1141",
"2366",
"3734",
"3733",
"3764",
"3765",
"1061",
"3766",
"3767",
"4423",
"6702",
"c_eb46b396",
"749",
"4900",
"750",
"756",
"757",
"748",
"4892",
"754",
"4042",
"c_e0e4608a",
"4422",
"3731",
"2903",
"3782",
"751",
"3786",
"747",
"6047",
"1840",
"5577",
"5579",
"400",
"401",
"5057",
"5452",
"5825",
"5590",
"5158",
"5554",
"c_34746c6e",
"5844",
"1460",
"3560",
"2592",
"3259",
"1311",
"5854",
"8465",
"5937",
"5519",
"5421",
"7927",
"2140",
"447472",
"5508",
"6054",
"4838",
"5645",
"5680",
"4071",
"5251",
"1048",
"c_871b5612",
"3717",
"7938",
"6128",
"8284",
"2164",
"5426",
"2153",
"c_047e5912",
"6049",
"441001",
"448275",
"c_edee9606",
"6219",
"7126",
"6072",
"447917",
"c_f5ed5adb",
"7985",
"7937",
"6076",
"7928",
"7221",
"5310",
"5354",
"447756",
"5641",
"6256",
"7936",
"c_8a658bb0",
"447958",
"5427",
"5757",
"7222",
"8439",
"c_4768a12e",
"7983",
"2198",
"5425",
"5642",
"447795",
"447660",
"c_96124aaf",
"6255",
"c_bbc13d07",
"4046",
"5342",
"7984",
"4038",
"2243",
"128",
"256",
"255",
"5455",
"114",
"4177",
"5762",
"4175",
"5885",
"c_8a6f744c",
"2324",
"12",
"4063",
"6222",
"5544",
"5547",
"5788",
"1474",
"442821",
"5763",
"5873",
"5409",
"5407",
"7932",
"4183",
"5629",
"5499",
"5948",
"3234",
"5418",
"1464",
"4070",
"3299",
"c_59f9c651",
"6274",
"5344",
"5343",
"5327",
"40",
"4125",
"4030",
"43",
"36",
"5127",
"7930",
"2927",
"3102",
"440549",
"440523",
"449",
"3484",
"4014",
"4015",
"4020",
"4022",
"6284",
"349",
"4457",
"8474",
"7194",
"4066",
"4065",
"5606",
"5534",
"5498",
"5411",
"2897",
"4018",
"1551",
"2247",
"4482",
"c_777eba81",
"4032",
"5871",
"8553",
"4068",
"443137",
"4035",
"7127",
"4481",
"4480",
"6142",
"4031",
"6725",
"4034",
"6726",
"2369",
"1245",
"4205",
"892",
"3923",
"2453",
"1670",
"3933",
"3947",
"307",
"4094",
"2856",
"1151",
"c_a935cf3f",
"2114",
"1278",
"1148",
"6715",
"3931",
"6390",
"1892",
"6383",
"3790",
"632",
"2367",
"4921",
"2232",
"685",
"710",
"1896",
"3293",
"5968",
"3925",
"1835",
"709",
"4110",
"4109",
"4108",
"5966",
"4107",
"5301",
"2376",
"3292",
"3291",
"5287",
"5018",
"5019",
"4315",
"4317",
"3793",
"1170",
"4316",
"1563",
"4319",
"1511",
"2847",
"2845",
"2846",
"2849",
"1176",
"1814",
"1175",
"240",
"241",
"2958",
"1565",
"4227",
"1568",
"4860",
"1571",
"5073",
"1836",
"1102",
"301",
"1573",
"303",
"1943",
"4984",
"1406",
"1394",
"3563",
"1374",
"2277",
"1271",
"136",
"5031",
"1113",
"1378",
"93",
"1395",
"5967",
"2710",
"666",
"5078",
"4454",
"5088",
"5384",
"252",
"1667",
"1556",
"1554",
"4173",
"4059",
"5363",
"5361",
"4497",
"2193",
"4994",
"1891",
"4215",
"1932",
"1744",
"4571",
"3732",
"3898",
"1714",
"4566",
"1607",
"1609",
"1603",
"1608",
"5231",
"1605",
"1604",
"4858",
"4857",
"1781",
"1285",
"2548",
"6042",
"2136",
"3724",
"4145",
"1668",
"4472",
"2064",
"2345",
"4210",
"4373",
"2950",
"2948",
"1261",
"5106",
"1117",
"1120",
"1118",
"1119",
"693",
"4582",
"1116",
"2888",
"4207",
"4681",
"1907",
"5728",
"2121",
"5263",
"1564",
"2302",
"c_60d786af",
"387",
"5232",
"6790",
"1654",
"2732",
"2749",
"6789",
"6787",
"2730",
"4311",
"2738",
"2731",
"79",
"1250",
"2212",
"2733",
"2729",
"2753",
"1240",
"2776",
"2762",
"c_16b63d4f",
"2757",
"2779",
"2741",
"2764",
"2782",
"2760",
"2777",
"2962",
"4083",
"2752",
"2743",
"473",
"2226",
"386",
"4726",
"2887",
"4294",
"1857",
"132",
"4498",
"6039",
"2963",
"741",
"683",
"2133",
"2658",
"463",
"8311",
"8332",
"8324",
"8328",
"8304",
"8317",
"1288",
"c_8331ae25",
"5352",
"c_791e9f27",
"246",
"2293",
"c_bbd07d89",
"c_be8fda35",
"c_f2f1fb47",
"c_0a700c19",
"2853",
"c_c44f1704",
"5041",
"7842",
"7847",
"7850",
"7849",
"7846",
"7845",
"7841",
"7840",
"7844",
"7853",
"7843",
"7848",
"7851",
"7852",
"8491",
"8490",
"8494",
"8496",
"8497",
"8493",
"8492",
"8495",
"3395",
"5076",
"3394",
"1730",
"3393",
"1177",
"1187",
"1209",
"2210",
"5037",
"4781",
"1101",
"5637",
"1248",
"1249",
"1660",
"1189",
"4632",
"511",
"6237",
"6242",
"6246",
"6238",
"6239",
"6245",
"6240",
"6241",
"6198",
"6200",
"6197",
"6202",
"6194",
"6201",
"6199",
"6195",
"6196",
"3891",
"1586",
"1701",
"4823",
"2570",
"1100",
"4660",
"4568",
"5091",
"4640",
"1659",
"5098",
"4155",
"1960",
"2139",
"2379",
"1813",
"4468",
"5075",
"725",
"5947",
"2021",
"113",
"1610",
"1230",
"5932",
"4825",
"5470",
"1239",
"3711",
"5880",
"5730",
"3918",
"3713",
"3903",
"4847",
"4835",
"4560",
"5879",
"5039",
"3726",
"8280",
"4693",
"1",
"4678",
"4659",
"2074",
"5027",
"8279",
"3719",
"1096",
"1020",
"8282",
"4297",
"1097",
"8283",
"3752",
"232",
"4296",
"4206",
"8281",
"7828",
"7829",
"7827",
"7821",
"7822",
"7818",
"7825",
"7826",
"7824",
"7819",
"7823",
"7820",
"c_de77a2c0",
"c_d4ca9a6c",
"c_4b33c289",
"c_e6d358a0",
"c_46e54685",
"c_c5c9cfc7",
"c_d3974887",
"7889",
"4458",
"1183",
"1295",
"4171",
"334",
"839",
"2315",
"7890",
"495",
"1717",
"305",
"4459",
"4244",
"7881",
"6251",
"6248",
"6247",
"6250",
"7880",
"6249",
"7904",
"7899",
"7901",
"7902",
"7905",
"7903",
"7900",
"5517",
"3039",
"3084",
"3061",
"7862",
"7864",
"7866",
"7867",
"7865",
"7858",
"7860",
"7857",
"7859",
"7856",
"7877",
"7878",
"7870",
"7873",
"7869",
"7871",
"7872",
"7875",
"7876",
"7874",
"7830",
"7839",
"7834",
"7838",
"7836",
"7854",
"7837",
"7835",
"7965",
"7979",
"7972",
"7976",
"7967",
"7970",
"7968",
"7977",
"7971",
"7978",
"7966",
"7973",
"7974",
"7980",
"7975",
"7969",
"2289",
"3384",
"3388",
"1127",
"6282",
"6283",
"2354",
"3385",
"3391",
"4172",
"3392",
"869",
"3382",
"1873",
"5273",
"3390",
"3822",
"3387",
"2038",
"3386",
"5423",
"291",
"332",
"302",
"342",
"357",
"3804",
"351",
"3799",
"3407",
"8371",
"8374",
"5774",
"4649",
"3381",
"1290",
"7208",
"1180",
"2362",
"1606",
"1624",
"261",
"1716",
"236",
"1903",
"2546",
"1862",
"6279",
"4776",
"1727",
"5777",
"4778",
"2291",
"2612",
"2613",
"2614",
"2822",
"4258",
"4266",
"1895",
"2907",
"2902",
"4265",
"3326",
"4268",
"2066",
"4267",
"4213",
"4262",
"4302",
"3085",
"1901",
"770",
"3086",
"1728",
"6786",
"1352",
"1731",
"634",
"3400",
"4250",
"1041",
"1040",
"2910",
"1038",
"1421",
"4254",
"3080",
"430",
"310",
"6143",
"2299",
"5148",
"2295",
"2933",
"358",
"2569",
"2297",
"6781",
"4223",
"461",
"1472",
"274",
"5146",
"3533",
"5992",
"3071",
"3068",
"835",
"889",
"723",
"862",
"3069",
"3070",
"976",
"745",
"797",
"786",
"766",
"8460",
"995",
"2407",
"2393",
"2408",
"2406",
"1409",
"4077",
"369",
"202",
"2490",
"2461",
"c_1138d9d2",
"2518",
"410",
"1463",
"3066",
"2465",
"2458",
"2844",
"2462",
"3194",
"653",
"660",
"658",
"661",
"c_87ccc7c3",
"652",
"3082",
"659",
"647",
"643",
"639",
"7951",
"655",
"650",
"4008",
"4016",
"4021",
"4013",
"6783",
"6032",
"654",
"1792",
"646",
"c_68ddcc11",
"5840",
"649",
"4601",
"c_8bf65f26",
"657",
"640",
"5564",
"2062",
"5056",
"3684",
"834",
"6785",
"3319",
"3072",
"2575",
"836",
"4372",
"3328",
"3083",
"2778",
"59",
"3333",
"536",
"3073",
"5845",
"5457",
"c_a6b5bad0",
"c_6eb6fb87",
"5443",
"c_12742da8",
"431",
"1542",
"5433",
"462",
"6782",
"2516",
"c_c8363c09",
"1468",
"3067",
"3079",
"4129",
"3052",
"5846",
"3047",
"3064",
"4128",
"3078",
"3081",
"3043",
"243",
"412",
"420",
"3157",
"5482",
"4802",
"413",
"416",
"4165",
"5958",
"1335",
"4800",
"339",
"6410",
"5336",
"6412",
"6411",
"5294",
"1269",
"6103",
"c_fa0acac6",
"344",
"6408",
"6409",
"347",
"c_2b70515a",
"345",
"612",
"4141",
"346",
"3188",
"6720",
"5048",
"2521",
"4308",
"5319",
"c_be0de7b7",
"2530",
"6413",
"2527",
"2539",
"2542",
"3159",
"3161",
"2531",
"2534",
"2541",
"2540",
"3164",
"6407",
"2532",
"3163",
"6414",
"2538",
"3162",
"2537",
"2526",
"2528",
"3531",
"3536",
"3153",
"3538",
"3546",
"3551",
"3535",
"3528",
"3687",
"972",
"865",
"1030",
"3149",
"3154",
"260",
"1987",
"1870",
"4785",
"2389",
"3414",
"2107",
"c_93670e37",
"415",
"1287",
"5697",
"4786",
"89",
"3967",
"4940",
"664",
"5741",
"844",
"3136",
"2611",
"177",
"2824",
"2626",
"2877",
"6400",
"434843",
"434786",
"434966",
"434938",
"434909",
"2947",
"3133",
"718",
"c_749f2ce9",
"2869",
"535",
"c_bb1a60eb",
"3139",
"6396",
"1157",
"5682",
"8550",
"6397",
"4074",
"3140",
"5794",
"2919",
"1160",
"1709",
"c_165899a6",
"c_d8ba2fe4",
"3144",
"3141",
"2841",
"5225",
"4801",
"3142",
"134",
"2826",
"1706",
"2827",
"6398",
"6131",
"2201",
"8300",
"6148",
"6161",
"5370",
"168",
"6252",
"690",
"1298",
"8373",
"1168",
"1291",
"4848",
"2547",
"3823",
"1019",
"336",
"2084",
"4320",
"1294",
"1509",
"1774",
"4580",
"1182",
"5989",
"3814",
"1519",
"863",
"2563",
"1504",
"4353",
"4324",
"3774",
"1874",
"1085",
"4839",
"1712",
"2286",
"1649",
"1318",
"1965",
"2543",
"5563",
"5898",
"5713",
"1125",
"5469",
"5859",
"5695",
"6223",
"3763",
"5860",
"1255",
"5063",
"5876",
"5892",
"5746",
"5946",
"5458",
"5858",
"1188",
"5100",
"1899",
"888",
"1338",
"5969",
"4707",
"2089",
"2368",
"1767",
"1254",
"4164",
"5049",
"235",
"4453",
"4160",
"1524",
"1355",
"4375",
"1768",
"4862",
"4461",
"4775",
"1968",
"1518",
"1555",
"4966",
"1584",
"2052",
"4246",
"2222",
"4732",
"5694",
"5925",
"5693",
"5944",
"5770",
"5721",
"434",
"1770",
"5951",
"6904",
"5732",
"6741",
"5594",
"3289",
"5305",
"5307",
"5306",
"5902",
"c_7dda56e2",
"6903",
"6929",
"c_abfaf2ea",
"5292",
"3844",
"5899",
"5878",
"5938",
"1594",
"5810",
"1854",
"6565",
"3288",
"3885",
"1855",
"5293",
"7355",
"3368",
"3364",
"2843",
"3375",
"c_41ef4cb8",
"5731",
"3372",
"1181",
"5894",
"1084",
"3366",
"1256",
"464",
"876",
"1442",
"2078",
"c_1af36ac9",
"4230",
"961",
"5734",
"4733",
"4635",
"3815",
"3813",
"5914",
"5818",
"3596",
"83",
"4881",
"2870",
"5653",
"7339",
"6545",
"5690",
"7361",
"6546",
"4229",
"6001",
"6000",
"5997",
"5998",
"5999",
"6003",
"4112",
"2471",
"5882",
"5994",
"8442",
"6559",
"5162",
"5165",
"5163",
"5164",
"7342",
"5297",
"450925",
"6562",
"4098",
"5896",
"4100",
"2122",
"4877",
"5513",
"c_e9ccc5a7",
"5747",
"4102",
"4103",
"1389",
"2601",
"3249",
"7946",
"3332",
"7981",
"1369",
"3512",
"3751",
"2604",
"1399",
"3412",
"7382",
"c_b0c14f45",
"185",
"1462",
"6541",
"3323",
"934",
"760",
"4495",
"6534",
"8448",
"6625",
"4007",
"2197",
"761",
"762",
"759",
"3302",
"3305",
"4504",
"162",
"4278",
"6737",
"851",
"7359",
"4834",
"5488",
"5429",
"7395",
"476",
"477",
"4741",
"5712",
"4709",
"1703",
"3380",
"591",
"4606",
"547",
"2908",
"6543",
"4259",
"2554",
"974",
"1993",
"1992",
"2480",
"1949",
"3464",
"1865",
"5280",
"5281",
"975",
"2340",
"160",
"2333",
"2594",
"1528",
"758",
"c_3a9ccfe3",
"6557",
"6555",
"6553",
"6739",
"375",
"6743",
"5560",
"5512",
"1710",
"2265",
"3748",
"3330",
"2174",
"2944",
"3331",
"1527",
"3336",
"c_e2de1ffd",
"3335",
"443600",
"6554",
"443629",
"c_67699417",
"3705",
"4471",
"3340",
"8461",
"5099",
"8472",
"c_6e3cbbca",
"3339",
"3608",
"1373",
"3343",
"3341",
"4767",
"1404",
"8471",
"3118",
"3702",
"6121",
"5723",
"c_d6ede0c7",
"3613",
"5766",
"853",
"3338",
"5396",
"3351",
"3349",
"5636",
"6550",
"6548",
"5484",
"6113",
"5395",
"5060",
"6253",
"6105",
"2514",
"2156",
"2424",
"981",
"1964",
"1880",
"2428",
"1736",
"5288",
"3838",
"1779",
"4647",
"2056",
"3835",
"5988",
"4783",
"5304",
"4462",
"3836",
"2568",
"6018",
"879",
"4990",
"1446",
"316",
"3834",
"4577",
"5065",
"3832",
"1059",
"1515",
"1516",
"6821",
"1827",
"2425",
"6822",
"4856",
"5993",
"3690",
"3881",
"8435",
"2744",
"1042",
"5971",
"5046",
"1499",
"5055",
"5045",
"4676",
"989",
"5242",
"5241",
"2430",
"3818",
"1382",
"1381",
"1383",
"3811",
"5410",
"3863",
"5500",
"5927",
"5888",
"5532",
"5466",
"3610",
"2228",
"3883",
"5014",
"5244",
"5346",
"5467",
"988",
"3884",
"1650",
"5013",
"3478",
"169",
"3471",
"6824",
"2765",
"2751",
"5819",
"2780",
"2936",
"3600",
"3616",
"70",
"3625",
"2686",
"299",
"1493",
"4624",
"4833",
"850",
"2712",
"3621",
"3620",
"990",
"3893",
"3892",
"4680",
"3541",
"3303",
"4166",
"1341",
"3728",
"2623",
"c_a3b85311",
"c_e749c083",
"248",
"262",
"2098",
"5362",
"c_a18525ab",
"c_f007e95f",
"3235",
"5153",
"947",
"2946",
"4493",
"5157",
"2386",
"5240",
"4749",
"2890",
"3240",
"4226",
"1952",
"2394",
"1132",
"2805",
"2798",
"3148",
"3674",
"2678",
"4818",
"2675",
"4397",
"5011",
"2684",
"4391",
"2799",
"508",
"1698",
"1959",
"7384",
"7383",
"426",
"2960",
"c_d1f03d01",
"5804",
"5943",
"5584",
"6212",
"c_9a1bc51d",
"5917",
"433",
"1808",
"1812",
"4393",
"2240",
"c_2ffe4574",
"1974",
"1975",
"1972",
"903",
"902",
"901",
"900",
"3220",
"4392",
"2900",
"5647",
"562",
"7351",
"742",
"3263",
"4814",
"5090",
"4774",
"4813",
"4752",
"3247",
"4815",
"5089",
"4795",
"4784",
"4806",
"c_ee45cd99",
"c_dd52c1e9",
"c_dcc650ef",
"c_7f2d2214",
"c_14d71455",
"c_9ffe3bdc",
"1476",
"5023",
"4394",
"1970",
"300",
"1973",
"587",
"1918",
"1300",
"1858",
"882",
"880",
"2225",
"1351",
"5602",
"6151",
"4603",
"4437",
"5751",
"3183",
"5820",
"5369",
"1642",
"1644",
"3180",
"5368",
"3181",
"6155",
"2321",
"4500",
"5935",
"97",
"2322",
"812",
"2047",
"2048",
"5750",
"4477",
"8552",
"2049",
"4389",
"3190",
"2961",
"3191",
"3175",
"4440",
"3568",
"1006",
"3267",
"4310",
"287",
"3065",
"4347",
"2839",
"2401",
"5666",
"3169",
"1641",
"5376",
"6926",
"5033",
"2746",
"4666",
"4673",
"4670",
"4668",
"4665",
"4669",
"3210",
"4664",
"4672",
"4667",
"5015",
"1305",
"5058",
"7947",
"1578",
"5771",
"771",
"c_f2c7a2f7",
"5696",
"3604",
"3603",
"263",
"3658",
"72",
"407",
"3213",
"2434",
"3211",
"3592",
"3212",
"3214",
"6901",
"3199",
"5353",
"1090",
"442884",
"1477",
"24",
"1199",
"3217",
"4288",
"4289",
"1996",
"1226",
"1225",
"4036",
"2871",
"c_e5dbcb63",
"1425",
"3678",
"2750",
"6369",
"4131",
"7136",
"4122",
"1881",
"4403",
"811",
"3591",
"5367",
"1839",
"4240",
"3094",
"46",
"1136",
"6115",
"6145",
"6146",
"c_8150867f",
"4290",
"1054",
"1721",
"1711",
"2217",
"6210",
"c_b3d1a308",
"c_0e4239f5",
"c_441fb0c4",
"266",
"279",
"265",
"1083",
"264",
"277",
"268",
"267",
"275",
"276",
"4641",
"5202",
"409",
"1815",
"1057",
"4884",
"1673",
"c_fa8f51b0",
"63",
"62",
"367",
"4842",
"2355",
"392",
"2911",
"5270",
"76",
"5267",
"3306",
"3008",
"2837",
"1662",
"478",
"64",
"519",
"5332",
"4309",
"931",
"354",
"1980",
"1982",
"1981",
"894",
"928",
"918",
"6755",
"2016",
"2358",
"4200",
"3896",
"4199",
"4196",
"6757",
"4192",
"5658",
"1051",
"3122",
"960",
"1674",
"5218",
"1301",
"5389",
"4158",
"4203",
"437",
"380",
"998",
"4313",
"6750",
"999",
"3821",
"5724",
"4140",
"3444",
"4106",
"2097",
"3445",
"1441",
"2595",
"3419",
"2557",
"366",
"3426",
"3425",
"3427",
"5453",
"5660",
"5468",
"3434",
"5952",
"3433",
"6377",
"5801",
"5901",
"7412",
"5827",
"3644",
"5525",
"3637",
"2835",
"3367",
"7935",
"6898",
"3472",
"201",
"3490",
"5485",
"5394",
"5639",
"5375",
"2380",
"6919",
"8410",
"2949",
"3455",
"3454",
"6930",
"7213",
"8441",
"6376",
"1215",
"1213",
"1212",
"7214",
"1008",
"80",
"2793",
"4348",
"958",
"2294",
"6374",
"4843",
"3295",
"4627",
"231",
"1467",
"2006",
"5122",
"860",
"6124",
"2356",
"5047",
"8464",
"994",
"4576",
"6112",
"2988",
"1612",
"3308",
"2990",
"1233",
"4854",
"1888",
"1908",
"4747",
"4745",
"4746",
"1988",
"4395",
"4236",
"6048",
"755",
"870",
"6310",
"6312",
"6309",
"191",
"604",
"6313",
"2979",
"645",
"4255",
"1475",
"984",
"1789",
"1445",
"2426",
"2415",
"2997",
"2996",
"6324",
"6721",
"6327",
"5016",
"5950",
"2935",
"2934",
"2621",
"2937",
"2620",
"131",
"1577",
"1534",
"3345",
"6319",
"767",
"765",
"1719",
"2204",
"4484",
"5227",
"2987",
"2357",
"1146",
"4388",
"5012",
"4637",
"4650",
"6928",
"2438",
"2437",
"2436",
"c_623f2583",
"1204",
"2306",
"2304",
"4829",
"4788",
"133",
"5257",
"5254",
"5253",
"2309",
"5256",
"2872",
"915",
"2329",
"3001",
"544",
"2323",
"997",
"5229",
"2984",
"2727",
"3275",
"6315",
"2709",
"6316",
"808",
"595",
"228",
"4876",
"4446",
"4327",
"1179",
"4339",
"2360",
"2017",
"2237",
"4613",
"2667",
"4287",
"4281",
"6293",
"4232",
"3629",
"2708",
"2714",
"2717",
"831",
"641",
"3628",
"4346",
"2719",
"7129",
"c_c95f62c1",
"1640",
"1821",
"3663",
"167",
"5042",
"3095",
"c_20f6ad03",
"6304",
"6264",
"6271",
"6270",
"6267",
"6269",
"6272",
"6263",
"6303",
"c_4254375b",
"8454",
"304",
"c_18ac5876",
"6171",
"5768",
"1669",
"4721",
"996",
"4575",
"4819",
"3805",
"2421",
"3802",
"2694",
"2693",
"2692",
"3647",
"3648",
"5726",
"3646",
"2077",
"1252",
"2852",
"867",
"c_18802d13",
"3848",
"6300",
"c_ab84e157",
"c_79e507c2",
"c_50fb9d42",
"c_d6e5f3ab",
"5461",
"3639",
"4419",
"4421",
"3638",
"6297",
"2206",
"c_a02c420c",
"c_28c51c2a",
"5887",
"730",
"5889",
"4047",
"3266",
"4386",
"3269",
"5215",
"5214",
"2865",
"4378",
"4382",
"3103",
"5213",
"1099",
"4398",
"531",
"2301",
"2663",
"2326",
"6855",
"c_52792cef",
"8463",
"4438",
"1238",
"3905",
"5699",
"2132",
"1142",
"6852",
"2349",
"2343",
"2352",
"1655",
"4542",
"4622",
"5865",
"4512",
"5392",
"158",
"5401",
"3681",
"3089",
"6849",
"4538",
"254",
"848",
"4655",
"4656",
"4652",
"5331",
"4654",
"3090",
"2433",
"3430",
"2234",
"193",
"4657",
"3127",
"4653",
"3438",
"2233",
"1283",
"5209",
"4520",
"1195",
"7945",
"2043",
"4830",
"4832",
"985",
"2282",
"2283",
"5103",
"5101",
"3700",
"2316",
"2044",
"4736",
"4735",
"3321",
"2728",
"3550",
"163",
"4551",
"4553",
"1243",
"593",
"2953",
"1535",
"4435",
"1257",
"1259",
"3548",
"1761",
"825",
"1334",
"1853",
"1280",
"5612",
"2857",
"1273",
"2375",
"8416",
"7188",
"5588",
"5527",
"1689",
"3417",
"1186",
"3573",
"3575",
"2279",
"5604",
"6026",
"3569",
"4233",
"1424",
"157",
"3363",
"6051",
"3571",
"7810",
"6718",
"3552",
"5941",
"3527",
"4548",
"4544",
"3516",
"3520",
"6347",
"1638",
"6346",
"532",
"6099",
"1634",
"3524",
"1648",
"1647",
"7949",
"6236",
"1343",
"6355",
"6357",
"2807",
"6356",
"5436",
"6150",
"1349",
"625",
"3845",
"4820",
"5869",
"3847",
"5085",
"5084",
"1783",
"2209",
"2208",
"2276",
"6351",
"2553",
"5986",
"2552",
"6353",
"6887",
"6891",
"5830",
"2363",
"1481",
"2876",
"1995",
"5558",
"5117",
"4694",
"2598",
"22",
"14",
"21",
"15",
"7190",
"1976",
"4871",
"3036",
"5784",
"4271",
"1047",
"734",
"5116",
"1860",
"2939",
"5936",
"5070",
"5918",
"6896",
"126",
"5959",
"1939",
"171",
"1593",
"3507",
"2506",
"2508",
"c_cef8ea9d",
"2587",
"5995",
"2800",
"2811",
"127",
"4753",
"4756",
"450",
"3059",
"2878",
"4755",
"2160",
"489",
"338",
"356",
"1242",
"2880",
"6006",
"159",
"4321",
"4757",
"4751",
"3045",
"3037",
"4758",
"355",
"5529",
"418",
"5276",
"694",
"1679",
"3041",
"1359",
"1082",
"2796",
"2808",
"1013",
"705",
"4764",
"2223",
"1678",
"3975",
"3976",
"5624",
"1579",
"2586",
"2584",
"2585",
"8423",
"8421",
"8422",
"8424",
"1448",
"5654",
"8415",
"1211",
"253",
"2630",
"769",
"3053",
"2991",
"4452",
"4467",
"2440",
"3055",
"526",
"3389",
"885",
"7199",
"5855",
"214",
"5609",
"5748",
"2332",
"2269",
"3260",
"471",
"379",
"2388",
"5960",
"5074",
"7375",
"8427",
"6731",
"6735",
"6732",
"6734",
"6730",
"6733",
"5599",
"3924",
"2158",
"6106",
"571",
"3099",
"1697",
"3699",
"151",
"8425",
"6610",
"5497",
"5496",
"3979",
"3980",
"2942",
"5837",
"3978",
"2941",
"5145",
"5412",
"5417",
"5416",
"5838",
"2940",
"5832",
"2964",
"3965",
"5686",
"5144",
"3973",
"4882",
"3970",
"8455",
"8443",
"3972",
"3971",
"1457",
"5633",
"4434",
"8453",
"3839",
"6900",
"501",
"3934",
"5079",
"3927",
"497",
"499",
"3635",
"3926",
"1339",
"2337",
"884",
"5910",
"3940",
"2338",
"3939",
"5161",
"5493",
"437483",
"c_471a61cf",
"1777",
"1129",
"1281",
"3475",
"5582",
"5559",
"3920",
"306",
"6077",
"2832",
"1498",
"8426",
"6277",
"548",
"717",
"1622",
"2881",
"2051",
"2086",
"100",
"6627",
"553",
"1941",
"1942",
"1473",
"c_558c1e00",
"c_b19d7503",
"859",
"5708",
"5444",
"5143",
"1623",
"3992",
"4791",
"3996",
"611",
"3995",
"5405",
"1114",
"3677",
"951",
"3998",
"3999",
"4001",
"505",
"2096",
"2036",
"4005",
"4004",
"3526",
"4003",
"2271",
"3951",
"8434",
"c_b499ede2",
"c_7546272f",
"3950",
"5933",
"3945",
"5428",
"5572",
"1251",
"6613",
"8433",
"2",
"1092",
"4742",
"2383",
"1600",
"6727",
"4349",
"3960",
"4114",
"2597",
"6899",
"6925",
"6728",
"953",
"950",
"1272",
"3989",
"2085",
"1546",
"1547",
"1545",
"1544",
"3983",
"1540",
"6624",
"5139",
"44",
"6622",
"94",
"37",
"3987",
"1548",
"3986",
"3985",
"c_d02647e2",
"437539",
"443",
"7939",
"92",
"437655",
"6176",
"6905",
"716",
"777",
"897",
"88",
"6276",
"6126",
"5197",
"5520",
"5071",
"3023",
"1429",
"1431",
"1427",
"1428",
"5473",
"6778",
"c_4cd4362b",
"4426",
"5198",
"5358",
"435299",
"5199",
"6779",
"5920",
"5919",
"5476",
"2157",
"3701",
"3033",
"5915",
"c_f53bfc88",
"5863",
"5929",
"5893",
"c_433922a6",
"630",
"4151",
"1582",
"1581",
"6189",
"444",
"6004",
"5272",
"1350",
"78",
"617",
"4135",
"4134",
"2860",
"2896",
"c_9a9e5627",
"c_65366e84",
"2894",
"7130",
"6768",
"5568",
"4864",
"3342",
"4138",
"5274",
"3032",
"493",
"1533",
"8414",
"4869",
"6067",
"4868",
"6764",
"4867",
"4866",
"4870",
"1332",
"5187",
"5186",
"5569",
"6104",
"6110",
"1745",
"5183",
"4384",
"4133",
"6771",
"c_324b44f1",
"6775",
"6177",
"6772",
"7950",
"c_a17a2156",
"6182",
"6178",
"6773",
"3022",
"4431",
"5195",
"3010",
"6139",
"4425",
"4429",
"5190",
"30",
"3860",
"6774",
"3018",
"4502",
"4433",
"5180",
"5194",
"5192",
"5193",
"5348",
"6770",
"1984",
"2883",
"2599",
"5320",
"3014",
"3012",
"4432",
"3015",
"3868",
"3871",
"5477",
"3867",
"4331",
"3872",
"6130",
"908",
"5478",
"5347",
"1024",
"428",
"3729",
"434664",
"5278",
"8385",
"8387",
"23",
"8386",
"7137",
"c_b605a1d4",
"17",
"6058",
"4875",
"1286",
"4610",
"3708",
"5526",
"703",
"4149",
"8377",
"8378",
"8375",
"7192",
"3727",
"7206",
"1461",
"7205",
"450953",
"1793",
"6095",
"6093",
"6094",
"257",
"16",
"7346",
"8395",
"5644",
"118",
"6101",
"3760",
"4218",
"1643",
"3761",
"3758",
"1934",
"1933",
"1656",
"6027",
"952",
"1060",
"1831",
"2945",
"1241",
"572",
"3739",
"8412",
"6701",
"2312",
"142",
"378",
"4629",
"466",
"2127",
"6700",
"2654",
"2715",
"2382",
"6045",
"1139",
"3768",
"5262",
"3773",
"3769",
"1267",
"1266",
"3775",
"6921",
"c_ece9dd5b",
"3770",
"3779",
"c_097751fd",
"2886",
"743",
"752",
"2256",
"2374",
"2482",
"6031",
"921",
"2882",
"2884",
"3788",
"1123",
"776",
"868",
"3789",
"1023",
"3631",
"6703",
"4341",
"3792",
"c_3508a934",
"2429",
"4617",
"5578",
"5575",
"5580",
"5787",
"5904",
"5866",
"4126",
"5561",
"5856",
"1052",
"437422",
"1851",
"2609",
"441716",
"738",
"4080",
"251",
"5539",
"5868",
"2591",
"5538",
"5459",
"5408",
"5548",
"5589",
"5399",
"5829",
"5518",
"5611",
"6288",
"447503",
"c_5cec7cfa",
"2346",
"8407",
"2347",
"2348",
"8542",
"5506",
"6050",
"6060",
"6057",
"6059",
"5509",
"6056",
"5507",
"c_e9845504",
"5807",
"4124",
"4044",
"5128",
"4174",
"19",
"2248",
"443167",
"439592",
"5308",
"8462",
"4043",
"5328",
"439498",
"447442",
"447536",
"237",
"5806",
"447604",
"4178",
"6708",
"6706",
"6710",
"5494",
"5552",
"8480",
"5550",
"5761",
"5942",
"6709",
"c_2e08d60f",
"8478",
"c_1c478aa5",
"4048",
"5404",
"5749",
"5403",
"116",
"5681",
"1450",
"c_6547ccb7",
"5491",
"6286",
"5864",
"5543",
"c_fd848a9f",
"5772",
"5413",
"2451",
"8409",
"4061",
"5137",
"5826",
"2008",
"c_6f7bbe63",
"c_8d31aed6",
"5675",
"2931",
"4037",
"350",
"3106",
"4024",
"4019",
"388",
"8479",
"353",
"352",
"4026",
"2930",
"6133",
"8468",
"5204",
"5812",
"6265",
"2227",
"2618",
"6266",
"5811",
"4023",
"440710",
"440657",
"440631",
"3494",
"6290",
"6289",
"445228",
"6285",
"445203",
"4027",
"4473",
"5129",
"445259",
"8489",
"440605",
"440579",
"440265",
"5815",
"2809",
"440238",
"5115",
"5511",
"5822",
"5456",
"5203",
"3087",
"5524",
"7128",
"5136",
"5953",
"5739",
"5581",
"1989",
"442850",
"5661",
"7362",
"5663",
"5803",
"443080",
"5662",
"5664",
"7391",
"7392",
"7393",
"6395",
"7394",
"6394",
"7941",
"2019",
"4888",
"1580",
"435265",
"443048",
"7389",
"7812",
"4176",
"1276",
"4770",
"6391",
"1936",
"3294",
"1147",
"1152",
"7360",
"1150",
"6389",
"6387",
"5269",
"6388",
"7218",
"1149",
"6386",
"6381",
"6392",
"6384",
"6716",
"5608",
"7955",
"c_ff4dd793",
"5338",
"5740",
"5028",
"5651",
"1246",
"5303",
"3282",
"4810",
"7410",
"2373",
"4889",
"707",
"5300",
"5298",
"5299",
"4883",
"5990",
"601",
"1134",
"4713",
"1671",
"2104",
"1552",
"1665",
"606",
"1553",
"1265",
"3806",
"1262",
"4861",
"1270",
"1572",
"1569",
"1575",
"1576",
"1570",
"1566",
"1574",
"1109",
"1103",
"1108",
"1112",
"1104",
"1111",
"1110",
"1107",
"1106",
"4773",
"5364",
"5000",
"5010",
"4682",
"4691",
"4687",
"4688",
"4685",
"4684",
"4689",
"4692",
"4683",
"2954",
"2955",
"920",
"4170",
"1863",
"970",
"1206",
"4088",
"2274",
"2221",
"1029",
"1772",
"5698",
"5385",
"4085",
"417",
"109",
"4340",
"1715",
"2273",
"6041",
"6040",
"8312",
"8315",
"8316",
"8321",
"8319",
"8334",
"8337",
"8336",
"8335",
"8333",
"8327",
"8313",
"8326",
"8325",
"8314",
"8310",
"8330",
"8329",
"8331",
"8306",
"8307",
"8309",
"8308",
"8305",
"8318",
"8322",
"8323",
"8320",
"1235",
"4824",
"1583",
"1904",
"2075",
"2377",
"1666",
"2439",
"2328",
"1089",
"5032",
"3714",
"4364",
"5381",
"1702",
"1430",
"8500",
"8502",
"8504",
"8501",
"8499",
"8503",
"8519",
"8518",
"8517",
"8520",
"8533",
"8530",
"8532",
"8529",
"8528",
"8531",
"8537",
"8536",
"8535",
"8538",
"8534",
"8513",
"8515",
"8514",
"8511",
"8512",
"8516",
"8506",
"8508",
"8505",
"8509",
"8507",
"8510",
"8526",
"8522",
"8523",
"8527",
"8524",
"8521",
"8525",
"8008",
"8007",
"8001",
"8000",
"8006",
"8004",
"8005",
"8002",
"8003",
"7999",
"7998",
"7994",
"7995",
"7996",
"7993",
"7990",
"7992",
"7991",
"7989",
"7988",
"5246",
"5247",
"c_82778b62",
"c_dffba30e",
"c_f9143718",
"c_049691c5",
"c_d3db05e6",
"c_cf9e250b",
"c_e4bfb3bb",
"c_a5c15adc",
"c_ec0f4970",
"c_070e15e7",
"c_23635024",
"c_496cee2d",
"c_095c8a84",
"c_412ed48a",
"c_7d7c383b",
"c_fee7acae",
"c_e300778f",
"c_b4cffbee",
"c_7a080afd",
"c_14e0ae65",
"c_eb3ada47",
"7895",
"7896",
"7893",
"7892",
"7891",
"5516",
"5515",
"5514",
"1713",
"2028",
"1227",
"1651",
"5038",
"5036",
"1027",
"2061",
"1661",
"1028",
"4410",
"5349",
"1124",
"5095",
"1876",
"4779",
"3888",
"1218",
"111",
"4351",
"2027",
"321",
"1611",
"5029",
"33",
"2018",
"4148",
"4782",
"1561",
"1841",
"1766",
"5556",
"1729",
"1843",
"4780",
"4217",
"5108",
"4092",
"2057",
"2058",
"1842",
"1128",
"3830",
"1217",
"1769",
"1216",
"1236",
"4465",
"4846",
"1166",
"4161",
"1617",
"249",
"1639",
"1734",
"5709",
"5706",
"5619",
"451941",
"451991",
"5652",
"5445",
"5454",
"314",
"1228",
"4567",
"2562",
"3415",
"1522",
"4816",
"3406",
"3405",
"2032",
"2820",
"4570",
"1185",
"496",
"3833",
"2024",
"3401",
"3403",
"2042",
"2055",
"1208",
"691",
"247",
"4841",
"2224",
"1253",
"4621",
"1220",
"1771",
"5020",
"6036",
"4342",
"2525",
"5965",
"2331",
"841",
"1219",
"4662",
"2231",
"5040",
"1292",
"1559",
"6278",
"1296",
"2037",
"3410",
"6280",
"205",
"6281",
"3858",
"4466",
"3416",
"1237",
"4772",
"4583",
"3411",
"4648",
"3857",
"2088",
"2229",
"993",
"5930",
"1613",
"1532",
"4354",
"2336",
"4572",
"1501",
"1560",
"1500",
"2866",
"2138",
"726",
"4620",
"4411",
"1308",
"623",
"1955",
"2101",
"112",
"852",
"5562",
"4249",
"1131",
"6061",
"5673",
"6705",
"6125",
"2627",
"5585",
"c_1137acd8",
"2390",
"2483",
"5886",
"6108",
"2281",
"3721",
"3310",
"959",
"2280",
"2241",
"4369",
"399",
"2761",
"2768",
"c_61b6fae1",
"5149",
"c_f05a3667",
"5472",
"5847",
"c_2a1a9fdc",
"5823",
"5849",
"3034",
"3051",
"1900",
"433963",
"3988",
"3155",
"3158",
"5890",
"5565",
"1440",
"856",
"5683",
"686",
"2063",
"2523",
"2522",
"3469",
"c_667f2c16",
"3135",
"4307",
"2759",
"6747",
"2535",
"916",
"2533",
"5605",
"5676",
"c_a2e1def8",
"5857",
"5760",
"5722",
"1879",
"5736",
"5737",
"5735",
"2503",
"1983",
"2110",
"2111",
"2108",
"2112",
"2109",
"4808",
"4796",
"4792",
"4803",
"4797",
"4807",
"2215",
"5463",
"c_d701b19c",
"c_3212cc75",
"6405",
"c_9055e9e1",
"c_0e092eaf",
"5464",
"597",
"5742",
"4891",
"5593",
"2060",
"434880",
"435007",
"5912",
"3132",
"5672",
"5754",
"6401",
"272",
"5533",
"5839",
"5782",
"4075",
"4076",
"6402",
"3138",
"941",
"919",
"1741",
"141",
"4853",
"4485",
"4628",
"2235",
"3457",
"2830",
"3128",
"3784",
"2203",
"4376",
"5072",
"451486",
"1557",
"1449",
"3780",
"5955",
"1526",
"4460",
"5026",
"1502",
"4326",
"1963",
"4325",
"4679",
"5172",
"3284",
"2855",
"3287",
"3286",
"3356",
"3285",
"6570",
"5900",
"c_1000336b",
"3369",
"5916",
"c_7951978e",
"5752",
"4636",
"3370",
"3730",
"1596",
"3371",
"192",
"7135",
"1480",
"5733",
"1997",
"6568",
"c_6f563894",
"c_aaa8331c",
"5911",
"5934",
"4306",
"331",
"3362",
"4494",
"1915",
"4525",
"4513",
"7959",
"4787",
"4809",
"5230",
"4804",
"1897",
"4712",
"2361",
"4710",
"4711",
"1861",
"c_c825b20c",
"6556",
"5168",
"6547",
"c_34242feb",
"5909",
"5167",
"4769",
"5956",
"4768",
"3609",
"1530",
"6038",
"1837",
"4642",
"1787",
"c_18180cde",
"294",
"5465",
"3236",
"6902",
"2220",
"5963",
"4463",
"4731",
"1971",
"230",
"1816",
"6359",
"8429",
"6334",
"6360",
"c_062f6587",
"4284",
"5025",
"4283",
"3618",
"6089",
"1732",
"1733",
"229",
"3215",
"3643",
"5573",
"3642",
"6711",
"1631",
"5393",
"3283",
"2724",
"6756",
"4190",
"4115",
"6378",
"3421",
"2603",
"91",
"5523",
"7417",
"5535",
"5756",
"3578",
"5432",
"5670",
"5326",
"434056",
"3508",
"4794",
"3509",
"3510",
"3456",
"c_4e75f061",
"2898",
"6325",
"2680",
"4476",
"1722",
"4638",
"4639",
"6913",
"3665",
"3662",
"3661",
"3666",
"5043",
"550",
"6307",
"435098",
"6295",
"6296",
"5235",
"5234",
"5233",
"6298",
"5481",
"435061",
"6912",
"8452",
"c_9a27a573",
"2671",
"2641",
"c_681e6d24",
"5111",
"5391",
"5388",
"467",
"468",
"469",
"7358",
"4737",
"4740",
"4739",
"4345",
"2385",
"6854",
"4197",
"4261",
"4658",
"c_b3e9d826",
"106",
"3354",
"107",
"3547",
"5339",
"3907",
"4439",
"5522",
"5521",
"6084",
"7943",
"7811",
"5743",
"2875",
"2874",
"5377",
"5378",
"425",
"5586",
"5600",
"3221",
"3063",
"383",
"2895",
"1093",
"2091",
"4701",
"5528",
"c_9d09fbc3",
"7381",
"7380",
"7223",
"5540",
"886",
"5853",
"95",
"404",
"348",
"448",
"2669",
"364",
"7366",
"2236",
"3921",
"1699",
"5711",
"5685",
"5802",
"5850",
"1747",
"5110",
"1940",
"5112",
"5113",
"6005",
"5492",
"3942",
"422",
"605",
"1171",
"603",
"598",
"6626",
"5330",
"5649",
"2867",
"5796",
"2272",
"1095",
"5570",
"4727",
"5659",
"6132",
"6616",
"5884",
"898",
"5924",
"4487",
"5828",
"c_715c08c0",
"3740",
"c_dea47e63",
"5635",
"3704",
"5601",
"6192",
"5671",
"5705",
"5174",
"6190",
"6191",
"5185",
"435228",
"8413",
"443345",
"6907",
"4698",
"4699",
"6776",
"6777",
"5745",
"6906",
"3875",
"5182",
"3011",
"c_25fe24f4",
"c_32e52346",
"3017",
"5179",
"395",
"2600",
"5808",
"3019",
"4428",
"3021",
"5189",
"6337",
"3755",
"890",
"1893",
"3738",
"c_6c8b04b9",
"4469",
"6699",
"3776",
"3772",
"3771",
"c_315af034",
"5419",
"5872",
"5926",
"c_4e3beee2",
"452085",
"5643",
"5138",
"450652",
"450658",
"c_e548b67f",
"c_b54dc55a",
"c_d08207d1",
"978",
"3238",
"4237",
"73",
"c_4d5b6dbd",
"2610",
"2608",
"2054",
"2607",
"6287",
"5135",
"5134",
"5133",
"6358",
"6053",
"7132",
"7986",
"5630",
"6723",
"6724",
"8482",
"8481",
"8477",
"8476",
"7982",
"7953",
"8475",
"7954",
"5618",
"4049",
"5406",
"8285",
"5505",
"5531",
"5504",
"5510",
"4052",
"4051",
"4041",
"5132",
"5834",
"c_c4c48e5c",
"5674",
"5315",
"5824",
"c_2212c2c4",
"5567",
"6218",
"7138",
"5312",
"1531",
"5131",
"445178",
"5816",
"5814",
"5790",
"5648",
"3866",
"c_70c441cc",
"c_104476d7",
"5704",
"5702",
"c_b8e8e06e",
"5700",
"5703",
"5701",
"8438",
"6385",
"442791",
"6744",
"6260",
"c_2c5a05a0",
"442765",
"6111",
"4720",
"5380",
"4717",
"4715",
"4718",
"4719",
"4724",
"4714",
"4716",
"4725",
"4723",
"4722",
"2952",
"2951",
"1263",
"1264",
"2956",
"4686",
"4212",
"484",
"5617",
"891",
"4578",
"1819",
"1196",
"3816",
"1822",
"1207",
"8547",
"3402",
"1986",
"1299",
"451885",
"333",
"451912",
"454784",
"7198",
"451462",
"1289",
"4523",
"4154",
"2899",
"4592",
"1121",
"503",
"7134",
"6784",
"3686",
"2769",
"5150",
"2572",
"1140",
"5798",
"5650",
"2529",
"5439",
"5727",
"8446",
"8445",
"5895",
"5874",
"450869",
"1775",
"8444",
"5222",
"5224",
"5223",
"5221",
"5220",
"1875",
"1776",
"1871",
"1877",
"1872",
"c_1c2d26cf",
"5791",
"c_3897ba70",
"6404",
"608",
"1169",
"610",
"599",
"609",
"607",
"c_25e0bb6d",
"600",
"3147",
"602",
"1683",
"1050",
"2080",
"840",
"3137",
"4335",
"940",
"c_82916123",
"8451",
"1505",
"2290",
"5438",
"663",
"7396",
"6566",
"c_8c634c9b",
"6740",
"4799",
"2545",
"c_8f56da2a",
"6370",
"434004",
"6273",
"2617",
"5597",
"c_ccac12ef",
"c_12e208c8",
"3777",
"5668",
"1056",
"4056",
"980",
"979",
"197",
"7813",
"196",
"200",
"8430",
"3812",
"3787",
"5022",
"6406",
"8303",
"6403",
"1049",
"2423",
"c_d978c1fc",
"1906",
"596",
"3396",
"8498",
"3399",
"4777",
"1905",
"3398",
"3397",
"4247",
"199",
"4053",
"198",
"4355",
"5318",
],
}
class MultiEURLEXConfig(datasets.BuilderConfig):
"""BuilderConfig for MultiEURLEX."""
def __init__(self, language: str, languages=None, label_level="level_1", **kwargs):
"""BuilderConfig for MultiEURLEX.
Args:
language: One of ar,bg,de,el,en,es,fr,hi,ru,sw,th,tr,ur,vi,zh, or all_languages
**kwargs: keyword arguments forwarded to super.
"""
super(MultiEURLEXConfig, self).__init__(**kwargs)
self.language = language
self.label_level = label_level
if language != "all_languages":
self.languages = [language]
else:
self.languages = languages if languages is not None else _LANGUAGES
class MultiEURLEX(datasets.GeneratorBasedBuilder):
"""MultiEURLEX - A multi-lingual and multi-label legal document classification dataset. Version 1.0"""
VERSION = datasets.Version("1.0.0", "")
BUILDER_CONFIG_CLASS = MultiEURLEXConfig
BUILDER_CONFIGS = [
MultiEURLEXConfig(
name=lang,
language=lang,
version=datasets.Version("1.0.0", ""),
description=f"Plain text import of MultiEURLEX for the {lang} language",
)
for lang in _LANGUAGES
] + [
MultiEURLEXConfig(
name="all_languages",
language="all_languages",
version=datasets.Version("1.0.0", ""),
description="Plain text import of MultiEURLEX for all languages",
)
]
def _info(self):
if self.config.language == "all_languages":
features = datasets.Features(
{
"celex_id": datasets.Value("string"),
"text": datasets.Translation(
languages=_LANGUAGES,
),
"labels": datasets.features.Sequence(
datasets.ClassLabel(names=_CONCEPTS[self.config.label_level])
),
}
)
else:
features = datasets.Features(
{
"celex_id": datasets.Value("string"),
"text": datasets.Value("string"),
"labels": datasets.features.Sequence(
datasets.ClassLabel(names=_CONCEPTS[self.config.label_level])
),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage="https://github.io/iliaschalkidis",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(DATA_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "train.jsonl"), "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "test.jsonl"), "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": os.path.join(data_dir, "dev.jsonl"), "split": "dev"},
),
]
def _generate_examples(self, filepath, split):
"""This function returns the examples in the raw (text) form."""
if self.config.language == "all_languages":
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"celex_id": data["celex_id"],
"text": {lang: data["text"][lang] for lang in self.config.languages},
"labels": data["eurovoc_concepts"][self.config.label_level],
}
else:
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
data = json.loads(row)
if data["text"][self.config.language] is not None:
yield id_, {
"celex_id": data["celex_id"],
"text": data["text"][self.config.language],
"labels": data["eurovoc_concepts"][self.config.label_level],
}
| 16.609465 | 106 | 0.273736 |
4a2770aa34f9b4617811e836a5182a9d833a1fe3 | 68 | py | Python | userplugins/AlphaMusic.py | AlphaEliasPY/VC-Bot- | 088f8efcb70dab24f32349dcc52557786905382a | [
"MIT"
] | 2 | 2021-08-28T10:36:27.000Z | 2022-02-03T16:48:38.000Z | userplugins/AlphaMusic.py | AlphaEliasPY/VC-Bot- | 088f8efcb70dab24f32349dcc52557786905382a | [
"MIT"
] | null | null | null | userplugins/AlphaMusic.py | AlphaEliasPY/VC-Bot- | 088f8efcb70dab24f32349dcc52557786905382a | [
"MIT"
] | 9 | 2021-08-25T07:27:08.000Z | 2022-02-07T15:06:31.000Z | from utils import *
from config import *
from userplugins import *
| 13.6 | 25 | 0.764706 |
4a27713d43da514894f4cdf99c5a88ad364f75b9 | 5,400 | py | Python | src/poetry/console/commands/install.py | ycechungAI/poetry | feb11b133af2e94d6c5239decb4e614426f2a299 | [
"MIT"
] | null | null | null | src/poetry/console/commands/install.py | ycechungAI/poetry | feb11b133af2e94d6c5239decb4e614426f2a299 | [
"MIT"
] | null | null | null | src/poetry/console/commands/install.py | ycechungAI/poetry | feb11b133af2e94d6c5239decb4e614426f2a299 | [
"MIT"
] | null | null | null | from __future__ import annotations
from cleo.helpers import option
from poetry.console.commands.installer_command import InstallerCommand
class InstallCommand(InstallerCommand):
name = "install"
description = "Installs the project dependencies."
options = [
*InstallerCommand._group_dependency_options(),
option(
"no-dev",
None,
"Do not install the development dependencies."
" (<warning>Deprecated</warning>)",
),
option(
"dev-only",
None,
"Only install the development dependencies."
" (<warning>Deprecated</warning>)",
),
option(
"sync",
None,
"Synchronize the environment with the locked packages and the specified"
" groups.",
),
option(
"no-root", None, "Do not install the root package (the current project)."
),
option(
"no-binary",
None,
"Do not use binary distributions for packages matching given policy.\n"
"Use package name to disallow a specific package; or <b>:all:</b> to\n"
"disallow and <b>:none:</b> to force binary for all packages. Multiple\n"
"packages can be specified separated by commas.",
flag=False,
multiple=True,
),
option(
"dry-run",
None,
"Output the operations but do not execute anything "
"(implicitly enables --verbose).",
),
option(
"remove-untracked",
None,
"Removes packages not present in the lock file."
" (<warning>Deprecated</warning>)",
),
option(
"extras",
"E",
"Extra sets of dependencies to install.",
flag=False,
multiple=True,
),
]
help = """The <info>install</info> command reads the <comment>poetry.lock</> file from
the current directory, processes it, and downloads and installs all the
libraries and dependencies outlined in that file. If the file does not
exist it will look for <comment>pyproject.toml</> and do the same.
<info>poetry install</info>
By default, the above command will also install the current project. To install only the
dependencies and not including the current project, run the command with the
<info>--no-root</info> option like below:
<info> poetry install --no-root</info>
"""
_loggers = ["poetry.repositories.pypi_repository", "poetry.inspection.info"]
def handle(self) -> int:
from poetry.core.masonry.utils.module import ModuleOrPackageNotFound
from poetry.masonry.builders.editable import EditableBuilder
self._installer.use_executor(
self.poetry.config.get("experimental.new-installer", False)
)
extras = []
for extra in self.option("extras", []):
if " " in extra:
extras += [e.strip() for e in extra.split(" ")]
else:
extras.append(extra)
self._installer.extras(extras)
with_synchronization = self.option("sync")
if self.option("remove-untracked"):
self.line_error(
"<warning>The `<fg=yellow;options=bold>--remove-untracked</>` option is"
" deprecated, use the `<fg=yellow;options=bold>--sync</>` option"
" instead.</warning>"
)
with_synchronization = True
if self.option("no-binary"):
policy = ",".join(self.option("no-binary", []))
try:
self._installer.no_binary(policy=policy)
except ValueError as e:
self.line_error(
f"<warning>Invalid value (<c1>{policy}</>) for"
f" `<b>--no-binary</b>`</>.\n\n<error>{e}</>"
)
return 1
self._installer.only_groups(self.activated_groups)
self._installer.dry_run(self.option("dry-run"))
self._installer.requires_synchronization(with_synchronization)
self._installer.verbose(self._io.is_verbose())
return_code = self._installer.run()
if return_code != 0:
return return_code
if self.option("no-root"):
return 0
try:
builder = EditableBuilder(self.poetry, self._env, self._io)
except ModuleOrPackageNotFound:
# This is likely due to the fact that the project is an application
# not following the structure expected by Poetry
# If this is a true error it will be picked up later by build anyway.
return 0
log_install = (
"<b>Installing</> the current project:"
f" <c1>{self.poetry.package.pretty_name}</c1>"
f" (<{{tag}}>{self.poetry.package.pretty_version}</>)"
)
overwrite = self._io.output.is_decorated() and not self.io.is_debug()
self.line("")
self.write(log_install.format(tag="c2"))
if not overwrite:
self.line("")
if self.option("dry-run"):
self.line("")
return 0
builder.build()
if overwrite:
self.overwrite(log_install.format(tag="success"))
self.line("")
return 0
| 32.727273 | 90 | 0.567037 |
4a277261a9eb3732f175b0d053d0bd8f2de98e5c | 428 | py | Python | myapp/migrations/0012_form_action.py | sharmin6630/Project-Distribution | 32692653c309b417187ab0299f074a38d1a5bd3e | [
"MIT"
] | null | null | null | myapp/migrations/0012_form_action.py | sharmin6630/Project-Distribution | 32692653c309b417187ab0299f074a38d1a5bd3e | [
"MIT"
] | 1 | 2021-08-04T15:41:05.000Z | 2021-08-04T15:41:05.000Z | myapp/migrations/0012_form_action.py | sharmin6630/Project-Distribution | 32692653c309b417187ab0299f074a38d1a5bd3e | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2021-06-26 00:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0011_form_assigned_supervisor_id'),
]
operations = [
migrations.AddField(
model_name='form',
name='action',
field=models.CharField(blank=True, default='Save', max_length=55, null=True),
),
]
| 22.526316 | 89 | 0.61215 |
4a2773671dd150f445e8246b9beaf67905695aed | 1,166 | py | Python | pype/hosts/maya/plugins/publish/validate_mesh_non_manifold.py | simonebarbieri/pype | a6dc83aa1300738749cbe8e5e2e6d2d1794e0289 | [
"MIT"
] | null | null | null | pype/hosts/maya/plugins/publish/validate_mesh_non_manifold.py | simonebarbieri/pype | a6dc83aa1300738749cbe8e5e2e6d2d1794e0289 | [
"MIT"
] | null | null | null | pype/hosts/maya/plugins/publish/validate_mesh_non_manifold.py | simonebarbieri/pype | a6dc83aa1300738749cbe8e5e2e6d2d1794e0289 | [
"MIT"
] | null | null | null | from maya import cmds
import pyblish.api
import pype.api
import pype.hosts.maya.api.action
class ValidateMeshNonManifold(pyblish.api.Validator):
"""Ensure that meshes don't have non-manifold edges or vertices
To debug the problem on the meshes you can use Maya's modeling
tool: "Mesh > Cleanup..."
"""
order = pype.api.ValidateMeshOrder
hosts = ['maya']
families = ['model']
label = 'Mesh Non-Manifold Vertices/Edges'
actions = [pype.hosts.maya.api.action.SelectInvalidAction]
@staticmethod
def get_invalid(instance):
meshes = cmds.ls(instance, type='mesh', long=True)
invalid = []
for mesh in meshes:
if (cmds.polyInfo(mesh, nonManifoldVertices=True) or
cmds.polyInfo(mesh, nonManifoldEdges=True)):
invalid.append(mesh)
return invalid
def process(self, instance):
"""Process all the nodes in the instance 'objectSet'"""
invalid = self.get_invalid(instance)
if invalid:
raise ValueError("Meshes found with non-manifold "
"edges/vertices: {0}".format(invalid))
| 27.116279 | 67 | 0.633791 |
4a27741b0fbb9c6c3548bc5de26bfc581ec23821 | 1,871 | py | Python | python/MultiChannelBuildTool.py | SteamedBunZL/AndroidUtilByZFT | cecb7a165946babb29bb8bd2a8ead9fde8e60043 | [
"Apache-2.0"
] | null | null | null | python/MultiChannelBuildTool.py | SteamedBunZL/AndroidUtilByZFT | cecb7a165946babb29bb8bd2a8ead9fde8e60043 | [
"Apache-2.0"
] | null | null | null | python/MultiChannelBuildTool.py | SteamedBunZL/AndroidUtilByZFT | cecb7a165946babb29bb8bd2a8ead9fde8e60043 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import zipfile
import shutil
import os
import time
#获取系统时间
def gainDateTime():
return time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
# 空文件 便于写入此空文件到apk包中作为channel文件
src_empty_file = 'info/chennel_empty.txt'
# 创建一个空文件(不存在则创建)
f = open(src_empty_file, 'w')
f.close()
# 获取当前目录中所有的apk源包
src_apks = []
# python3 : os.listdir()即可,这里使用兼容Python2的os.listdir('.')
for file in os.listdir('.'):
if os.path.isfile(file):
extension = os.path.splitext(file)[1][1:]
if extension in 'apk':
src_apks.append(file)
# 获取渠道列表
channel_file = 'info/channel_list.txt'
f = open(channel_file)
lines = f.readlines()
f.close()
for src_apk in src_apks:
# file name (with extension)
src_apk_file_name = os.path.basename(src_apk)
# 分割文件名与后缀
temp_list = os.path.splitext(src_apk_file_name)
# name without extension
src_apk_name = temp_list[0]
# 后缀名,包含. 例如: ".apk "
src_apk_extension = temp_list[1]
# 创建生成目录,与文件名相关
output_dir = 'output_' + src_apk_name + '/'
# 目录不存在则创建
if not os.path.exists(output_dir):
os.mkdir(output_dir)
print('开始生成渠道-->'+gainDateTime())
# 遍历渠道号并创建对应渠道号的apk文件
for line in lines:
# 获取当前渠道号,因为从渠道文件中获得带有\n,所有strip一下
target_channel = line.strip()
# 拼接对应渠道号的apk
target_apk = output_dir + src_apk_name + "_" + target_channel + src_apk_extension
# 打印Log
print('生产渠道包'+ gainDateTime() +'-->'+ target_apk )
# 拷贝建立新apk
shutil.copy(src_apk, target_apk)
# zip获取新建立的apk文件
zipped = zipfile.ZipFile(target_apk, 'a', zipfile.ZIP_DEFLATED)
# 初始化渠道信息
empty_channel_file = "META-INF/ChannelName_{channel}".format(channel = target_channel)
# 写入渠道信息
zipped.write(src_empty_file, empty_channel_file)
# 关闭zip流
zipped.close() | 28.784615 | 94 | 0.652058 |
4a2775cf59a76cd2573df4e50121c9a7d418a82d | 928 | py | Python | filibuster/logger/__init__.py | filibuster-testing/filibuster | b1df9d5100f8dbfc5ff07cd2ddc76ecbc49485fb | [
"Apache-2.0"
] | 14 | 2021-10-16T21:49:14.000Z | 2022-03-26T23:02:12.000Z | filibuster/logger/__init__.py | filibuster-testing/filibuster | b1df9d5100f8dbfc5ff07cd2ddc76ecbc49485fb | [
"Apache-2.0"
] | 2 | 2022-03-17T05:48:04.000Z | 2022-03-24T02:13:38.000Z | filibuster/logger/__init__.py | filibuster-testing/filibuster | b1df9d5100f8dbfc5ff07cd2ddc76ecbc49485fb | [
"Apache-2.0"
] | null | null | null | import os
import sys
class BColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def error(string):
print(BColors.FAIL + "[FILIBUSTER] [FAIL]: " + string + BColors.ENDC, file=sys.stderr, flush=True)
def warning(string):
print(BColors.WARNING + "[FILIBUSTER] [WARNING]: " + string + BColors.ENDC, file=sys.stderr, flush=True)
def notice(string):
print(BColors.OKGREEN + "[FILIBUSTER] [NOTICE]: " + string + BColors.ENDC, file=sys.stderr, flush=True)
def info(string):
print(BColors.OKBLUE + "[FILIBUSTER] [INFO]: " + string + BColors.ENDC, file=sys.stderr, flush=True)
def debug(string):
if os.environ.get("DEBUG", ""):
print(BColors.OKCYAN + "[FILIBUSTER] [DEBUG]: " + string + BColors.ENDC, file=sys.stderr, flush=True)
| 25.777778 | 109 | 0.630388 |
4a2775d9bb62ccdf7b838610ae2f4036d0921077 | 445 | py | Python | djangocon_2020/site/templatetags/markdown_extras.py | thibaudcolas/2021.djangocon.eu | 7b2b4de2105f4c31369081f14c9777727a73244f | [
"MIT"
] | 4 | 2021-04-29T22:11:39.000Z | 2022-03-20T20:26:29.000Z | djangocon_2020/site/templatetags/markdown_extras.py | thibaudcolas/2021.djangocon.eu | 7b2b4de2105f4c31369081f14c9777727a73244f | [
"MIT"
] | 14 | 2022-03-09T16:30:03.000Z | 2022-03-31T08:43:07.000Z | pycon_portugal_2022/site/templatetags/markdown_extras.py | pyconpt/2022 | 33019223a6138edfde0e23722cd054cdd1ad22c4 | [
"MIT"
] | 6 | 2021-03-12T09:48:38.000Z | 2021-05-22T22:06:39.000Z | from django import template
from django.template.defaultfilters import stringfilter
import markdown as md
register = template.Library()
@register.filter()
@stringfilter
def markdown(value):
r = {}
f = open(value, 'r')
f = f.read()
m = md.Markdown(extensions = [
'extra',
'nl2br',
'sane_lists',
'meta',
'toc',
])
r['html'] = m.convert(f)
r['meta'] = m.Meta
return r
| 17.8 | 55 | 0.570787 |
4a2775eb9108f09c34d5a6bed2449ee5a0bbecc1 | 47,982 | py | Python | src/quadruped/quadruped/RL/gymVrka/gym_vrka/envs/vrka.py | BitsRobocon/AutonomousQuadruped | 980d4a568c73c98093839f372e0db0193f51bb1d | [
"Apache-2.0"
] | null | null | null | src/quadruped/quadruped/RL/gymVrka/gym_vrka/envs/vrka.py | BitsRobocon/AutonomousQuadruped | 980d4a568c73c98093839f372e0db0193f51bb1d | [
"Apache-2.0"
] | null | null | null | src/quadruped/quadruped/RL/gymVrka/gym_vrka/envs/vrka.py | BitsRobocon/AutonomousQuadruped | 980d4a568c73c98093839f372e0db0193f51bb1d | [
"Apache-2.0"
] | null | null | null | """
CODE BASED ON EXAMPLE FROM:
@misc{coumans2017pybullet,
title={Pybullet, a python module for physics simulation in robotics, games and machine learning},
author={Coumans, Erwin and Bai, Yunfei},
url={www.pybullet.org},
year={2017},
}
Example: minitaur.py
https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/gym/pybullet_envs/bullet/minitaur.py
"""
import collections
import copy
import math
import re
import numpy as np
from . import motor
from src.quadruped.quadruped import pybullet_data
print(pybullet_data.getDataPath())
# from .bezier import legIK as IK
# from .bezier import BezierPoints
from .Vrkakinematics import VrkaModel
from . import LieAlgebra as LA
INIT_POSITION = [0, 0, 0.25]
INIT_RACK_POSITION = [0, 0, 1]
# NOTE: URDF IS FACING THE WRONG WAY
# TEMP FIX
INIT_ORIENTATION = [0, 0, 0, 1]
OVERHEAT_SHUTDOWN_TORQUE = 2.45
OVERHEAT_SHUTDOWN_TIME = 1.0
# -math.pi / 5
INIT_LEG_POS = -0.658319
# math.pi / 3
INIT_FOOT_POS = 1.0472
OLD_LEG_POSITION = ["front_left", "front_right", "rear_left", "rear_right"]
OLD_MOTOR_NAMES = [
"motor_front_left_shoulder", "motor_front_left_leg",
"foot_motor_front_left", "motor_front_right_shoulder",
"motor_front_right_leg", "foot_motor_front_right",
"motor_rear_left_shoulder", "motor_rear_left_leg", "foot_motor_rear_left",
"motor_rear_right_shoulder", "motor_rear_right_leg",
"foot_motor_rear_right"
]
OLD_MOTOR_LIMITS_BY_NAME = {}
for name in OLD_MOTOR_NAMES:
if "shoulder" in name:
OLD_MOTOR_LIMITS_BY_NAME[name] = [-1.04, 1.04]
elif "leg" in name:
OLD_MOTOR_LIMITS_BY_NAME[name] = [-2.59, 1.571]
elif "foot" in name:
OLD_MOTOR_LIMITS_BY_NAME[name] = [-1.571, 2.9]
OLD_FOOT_NAMES = [
"front_left_toe", "front_right_toe", "rear_left_toe", "rear_right_toe"
]
LEG_POSITION = ["front_left", "front_right", "back_left", "back_right"]
MOTOR_NAMES = [
"motor_front_left_hip", "motor_front_left_upper_leg",
"motor_front_left_lower_leg", "motor_front_right_hip",
"motor_front_right_upper_leg", "motor_front_right_lower_leg",
"motor_back_left_hip", "motor_back_left_upper_leg",
"motor_back_left_lower_leg", "motor_back_right_hip",
"motor_back_right_upper_leg", "motor_back_right_lower_leg"
]
MOTOR_LIMITS_BY_NAME = {}
for name in MOTOR_NAMES:
if "hip" in name:
MOTOR_LIMITS_BY_NAME[name] = [-1.04, 1.04]
elif "upper_leg" in name:
MOTOR_LIMITS_BY_NAME[name] = [-1.571, 2.59]
elif "lower_leg" in name:
MOTOR_LIMITS_BY_NAME[name] = [-2.9, 1.671]
FOOT_NAMES = [
"front_left_leg_foot", "front_right_leg_foot", "back_left_leg_foot",
"back_right_leg_foot"
]
_CHASSIS_NAME_PATTERN = re.compile(r"chassis\D*")
_MOTOR_NAME_PATTERN = re.compile(r"motor\D*")
_FOOT_NAME_PATTERN = re.compile(r"foot\D*")
SENSOR_NOISE_STDDEV = (0.0, 0.0, 0.0, 0.0, 0.0)
TWO_PI = 2 * math.pi
def MapToMinusPiToPi(angles):
"""Maps a list of angles to [-pi, pi].
Args:
angles: A list of angles in rad.
Returns:
A list of angle mapped to [-pi, pi].
"""
mapped_angles = copy.deepcopy(angles)
for i in range(len(angles)):
mapped_angles[i] = math.fmod(angles[i], TWO_PI)
if mapped_angles[i] >= math.pi:
mapped_angles[i] -= TWO_PI
elif mapped_angles[i] < -math.pi:
mapped_angles[i] += TWO_PI
return mapped_angles
class Vrka(object):
"""The vrka class that simulates a quadruped robot.
"""
INIT_POSES = {
'stand':
np.array([
0.15192765, 0.7552236, -1.5104472, -0.15192765, 0.7552236,
-1.5104472, 0.15192765, 0.7552236, -1.5104472, -0.15192765,
0.7552236, -1.5104472
]),
'liedown':
np.array([-0.4, -1.5, 6, 0.4, -1.5, 6, -0.4, -1.5, 6, 0.4, -1.5, 6]),
'zero':
np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
}
def __init__(self,
pybullet_client,
urdf_root=pybullet_data.getDataPath(),
time_step=0.01,
action_repeat=1,
self_collision_enabled=False,
motor_velocity_limit=9.7,
pd_control_enabled=False,
accurate_motor_model_enabled=False,
remove_default_joint_damping=False,
max_force=100.0,
motor_kp=1.0,
motor_kd=0.02,
pd_latency=0.0,
control_latency=0.0,
observation_noise_stdev=SENSOR_NOISE_STDDEV,
torque_control_enabled=False,
motor_overheat_protection=False,
on_rack=False,
kd_for_pd_controllers=0.3,
pose_id='stand',
np_random=np.random,
contacts=True):
"""Constructs a vrka and reset it to the initial states.
Args:
pybullet_client: The instance of BulletClient to manage different
simulations.
urdf_root: The path to the urdf folder.
time_step: The time step of the simulation.
action_repeat: The number of ApplyAction() for each control step.
self_collision_enabled: Whether to enable self collision.
motor_velocity_limit: The upper limit of the motor velocity.
pd_control_enabled: Whether to use PD control for the motors.
accurate_motor_model_enabled: Whether to use the accurate DC motor model.
remove_default_joint_damping: Whether to remove the default joint damping.
motor_kp: proportional gain for the accurate motor model.
motor_kd: derivative gain for the accurate motor model.
pd_latency: The latency of the observations (in seconds) used to calculate
PD control. On the real hardware, it is the latency between the
microcontroller and the motor controller.
control_latency: The latency of the observations (in second) used to
calculate action. On the real hardware, it is the latency from the motor
controller, the microcontroller to the host (Nvidia TX2).
observation_noise_stdev: The standard deviation of a Gaussian noise model
for the sensor. It should be an array for separate sensors in the
following order [motor_angle, motor_velocity, motor_torque,
base_roll_pitch_yaw, base_angular_velocity]
torque_control_enabled: Whether to use the torque control, if set to
False, pose control will be used.
motor_overheat_protection: Whether to shutdown the motor that has exerted
large torque (OVERHEAT_SHUTDOWN_TORQUE) for an extended amount of time
(OVERHEAT_SHUTDOWN_TIME). See ApplyAction() in vrka.py for more
details.
on_rack: Whether to place the vrka on rack. This is only used to debug
the walking gait. In this mode, the vrka's base is hanged midair so
that its walking gait is clearer to visualize.
"""
# vrka MODEL
self.vrka = VrkaModel()
# Whether to include contact sensing
self.contacts = contacts
# Control Inputs
self.StepLength = 0.0
self.StepVelocity = 0.0
self.LateralFraction = 0.0
self.YawRate = 0.0
# Leg Phases
self.LegPhases = [0.0, 0.0, 0.0, 0.0]
# used to calculate minitaur acceleration
self.init_leg = INIT_LEG_POS
self.init_foot = INIT_FOOT_POS
self.prev_ang_twist = np.array([0, 0, 0])
self.prev_lin_twist = np.array([0, 0, 0])
self.prev_lin_acc = np.array([0, 0, 0])
self.num_motors = 12
self.num_legs = int(self.num_motors / 3)
self._pybullet_client = pybullet_client
self._action_repeat = action_repeat
self._urdf_root = urdf_root
self._self_collision_enabled = self_collision_enabled
self._motor_velocity_limit = motor_velocity_limit
self._pd_control_enabled = pd_control_enabled
self._motor_direction = np.ones(self.num_motors)
self._observed_motor_torques = np.zeros(self.num_motors)
self._applied_motor_torques = np.zeros(self.num_motors)
self._max_force = max_force
self._pd_latency = pd_latency
self._control_latency = control_latency
self._observation_noise_stdev = observation_noise_stdev
self._accurate_motor_model_enabled = accurate_motor_model_enabled
self._remove_default_joint_damping = remove_default_joint_damping
self._observation_history = collections.deque(maxlen=100)
self._control_observation = []
self._chassis_link_ids = [-1]
self._leg_link_ids = []
self._motor_link_ids = []
self._foot_link_ids = []
self._torque_control_enabled = torque_control_enabled
self._motor_overheat_protection = motor_overheat_protection
self._on_rack = on_rack
self._pose_id = pose_id
self.np_random = np_random
if self._accurate_motor_model_enabled:
self._kp = motor_kp
self._kd = motor_kd
self._motor_model = motor.MotorModel(
torque_control_enabled=self._torque_control_enabled,
kp=self._kp,
kd=self._kd)
elif self._pd_control_enabled:
self._kp = 8
self._kd = kd_for_pd_controllers
else:
self._kp = 1
self._kd = 1
self.time_step = time_step
self._step_counter = 0
# reset_time=-1.0 means skipping the reset motion.
# See Reset for more details.
self.Reset(reset_time=-1)
self.init_on_rack_position = INIT_RACK_POSITION
self.init_position = INIT_POSITION
self.initial_pose = self.INIT_POSES[pose_id]
def _RecordMassInfoFromURDF(self):
self._base_mass_urdf = []
for chassis_id in self._chassis_link_ids:
self._base_mass_urdf.append(
self._pybullet_client.getDynamicsInfo(self.quadruped,
chassis_id)[0])
self._leg_masses_urdf = []
for leg_id in self._leg_link_ids:
self._leg_masses_urdf.append(
self._pybullet_client.getDynamicsInfo(self.quadruped,
leg_id)[0])
for motor_id in self._motor_link_ids:
self._leg_masses_urdf.append(
self._pybullet_client.getDynamicsInfo(self.quadruped,
motor_id)[0])
def GetBaseMassFromURDF(self):
"""Get the mass of the base from the URDF file."""
return self._base_mass_urdf
def SetBaseMass(self, base_mass):
for i in range(len(self._chassis_link_ids)):
self._pybullet_client.changeDynamics(self.quadruped,
self._chassis_link_ids[i],
mass=base_mass[i])
def _RecordInertiaInfoFromURDF(self):
"""Record the inertia of each body from URDF file."""
self._link_urdf = []
num_bodies = self._pybullet_client.getNumJoints(self.quadruped)
for body_id in range(-1, num_bodies): # -1 is for the base link.
inertia = self._pybullet_client.getDynamicsInfo(
self.quadruped, body_id)[2]
self._link_urdf.append(inertia)
# We need to use id+1 to index self._link_urdf because it has the base
# (index = -1) at the first element.
self._base_inertia_urdf = [
self._link_urdf[chassis_id + 1]
for chassis_id in self._chassis_link_ids
]
self._leg_inertia_urdf = [
self._link_urdf[leg_id + 1] for leg_id in self._leg_link_ids
]
self._leg_inertia_urdf.extend([
self._link_urdf[motor_id + 1] for motor_id in self._motor_link_ids
])
def _BuildJointNameToIdDict(self):
num_joints = self._pybullet_client.getNumJoints(self.quadruped)
self._joint_name_to_id = {}
for i in range(num_joints):
joint_info = self._pybullet_client.getJointInfo(self.quadruped, i)
self._joint_name_to_id[joint_info[1].decode(
"UTF-8")] = joint_info[0]
def _BuildMotorIdList(self):
self._motor_id_list = [
self._joint_name_to_id[motor_name] for motor_name in MOTOR_NAMES
]
def _BuildFootIdList(self):
self._foot_id_list = [
self._joint_name_to_id[foot_name] for foot_name in FOOT_NAMES
]
print(self._foot_id_list)
def _BuildUrdfIds(self):
"""Build the link Ids from its name in the URDF file."""
c = []
m = []
f = []
lg = []
num_joints = self._pybullet_client.getNumJoints(self.quadruped)
self._chassis_link_ids = [-1]
# the self._leg_link_ids include both the upper and lower links of the leg.
self._leg_link_ids = []
self._motor_link_ids = []
self._foot_link_ids = []
for i in range(num_joints):
joint_info = self._pybullet_client.getJointInfo(self.quadruped, i)
joint_name = joint_info[1].decode("UTF-8")
joint_id = self._joint_name_to_id[joint_name]
if _CHASSIS_NAME_PATTERN.match(joint_name):
c.append(joint_name)
self._chassis_link_ids.append(joint_id)
elif _MOTOR_NAME_PATTERN.match(joint_name):
m.append(joint_name)
self._motor_link_ids.append(joint_id)
elif _FOOT_NAME_PATTERN.match(joint_name):
f.append(joint_name)
self._foot_link_ids.append(joint_id)
else:
lg.append(joint_name)
self._leg_link_ids.append(joint_id)
self._leg_link_ids.extend(self._foot_link_ids)
self._chassis_link_ids.sort()
self._motor_link_ids.sort()
self._foot_link_ids.sort()
self._leg_link_ids.sort()
def Reset(self,
reload_urdf=True,
default_motor_angles=None,
reset_time=3.0):
"""Reset the vrka to its initial states.
Args:
reload_urdf: Whether to reload the urdf file. If not, Reset() just place
the vrka back to its starting position.
default_motor_angles: The default motor angles. If it is None, vrka
will hold a default pose for 100 steps. In
torque control mode, the phase of holding the default pose is skipped.
reset_time: The duration (in seconds) to hold the default motor angles. If
reset_time <= 0 or in torque control mode, the phase of holding the
default pose is skipped.
"""
if self._on_rack:
init_position = INIT_RACK_POSITION
else:
init_position = INIT_POSITION
if reload_urdf:
if self._self_collision_enabled:
self.quadruped = self._pybullet_client.loadURDF(
pybullet_data.getDataPath() + "/vrka.urdf",
init_position,
useFixedBase=self._on_rack,
flags=self._pybullet_client.URDF_USE_SELF_COLLISION_EXCLUDE_PARENT)
else:
self.quadruped = self._pybullet_client.loadURDF(
pybullet_data.getDataPath() + "/vrka.urdf",
init_position,
INIT_ORIENTATION,
useFixedBase=self._on_rack)
self._BuildJointNameToIdDict()
self._BuildUrdfIds()
if self._remove_default_joint_damping:
self._RemoveDefaultJointDamping()
self._BuildMotorIdList()
self._BuildFootIdList()
self._RecordMassInfoFromURDF()
self._RecordInertiaInfoFromURDF()
self.ResetPose(add_constraint=True)
else:
self._pybullet_client.resetBasePositionAndOrientation(
self.quadruped, init_position, INIT_ORIENTATION)
self._pybullet_client.resetBaseVelocity(self.quadruped, [0, 0, 0],
[0, 0, 0])
# self._pybullet_client.changeDynamics(self.quadruped, -1, lateralFriction=0.8)
self.ResetPose(add_constraint=False)
self._overheat_counter = np.zeros(self.num_motors)
self._motor_enabled_list = [True] * self.num_motors
self._step_counter = 0
# Perform reset motion within reset_duration if in position control mode.
# Nothing is performed if in torque control mode for now.
self._observation_history.clear()
if reset_time > 0.0 and default_motor_angles is not None:
self.RealisticObservation()
for _ in range(100):
self.ApplyAction(self.initial_pose)
self._pybullet_client.stepSimulation()
self.RealisticObservation()
num_steps_to_reset = int(reset_time / self.time_step)
for _ in range(num_steps_to_reset):
self.ApplyAction(default_motor_angles)
self._pybullet_client.stepSimulation()
self.RealisticObservation()
self.RealisticObservation()
# Set Foot Friction
self.SetFootFriction()
def _RemoveDefaultJointDamping(self):
num_joints = self._pybullet_client.getNumJoints(self.quadruped)
for i in range(num_joints):
joint_info = self._pybullet_client.getJointInfo(self.quadruped, i)
self._pybullet_client.changeDynamics(joint_info[0],
-1,
linearDamping=0,
angularDamping=0)
def _SetMotorTorqueById(self, motor_id, torque):
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=motor_id,
controlMode=self._pybullet_client.TORQUE_CONTROL,
force=torque)
def _SetDesiredMotorAngleById(self, motor_id, desired_angle):
if self._pd_control_enabled or self._accurate_motor_model_enabled:
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=motor_id,
controlMode=self._pybullet_client.POSITION_CONTROL,
targetPosition=desired_angle,
positionGain=self._kp,
velocityGain=self._kd,
force=self._max_force)
# Pybullet has a 'perfect' joint controller with its default p,d
else:
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=motor_id,
controlMode=self._pybullet_client.POSITION_CONTROL,
targetPosition=desired_angle)
def _SetDesiredMotorAngleByName(self, motor_name, desired_angle):
self._SetDesiredMotorAngleById(self._joint_name_to_id[motor_name],
desired_angle)
def ResetPose(self, add_constraint):
"""Reset the pose of the vrka.
Args:
add_constraint: Whether to add a constraint at the joints of two feet.
"""
for i in range(self.num_legs):
self._ResetPoseForLeg(i, add_constraint)
def _ResetPoseForLeg(self, leg_id, add_constraint):
"""Reset the initial pose for the leg.
Args:
leg_id: It should be 0, 1, 2, or 3, which represents the leg at
front_left, back_left, front_right and back_right.
add_constraint: Whether to add a constraint at the joints of two feet.
"""
knee_friction_force = 0
pi = math.pi
leg_position = LEG_POSITION[leg_id]
self._pybullet_client.resetJointState(
self.quadruped,
self._joint_name_to_id["motor_" + leg_position + "_hip"],
self.INIT_POSES[self._pose_id][3 * leg_id],
targetVelocity=0)
self._pybullet_client.resetJointState(
self.quadruped,
self._joint_name_to_id["motor_" + leg_position + "_upper_leg"],
self.INIT_POSES[self._pose_id][3 * leg_id + 1],
targetVelocity=0)
self._pybullet_client.resetJointState(
self.quadruped,
self._joint_name_to_id["motor_" + leg_position + "_lower_leg"],
self.INIT_POSES[self._pose_id][3 * leg_id + 2],
targetVelocity=0)
if self._accurate_motor_model_enabled or self._pd_control_enabled:
# Disable the default motor in pybullet.
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(self._joint_name_to_id["motor_" + leg_position +
"_hip"]),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=knee_friction_force)
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(self._joint_name_to_id["motor_" + leg_position +
"_upper_leg"]),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=knee_friction_force)
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(self._joint_name_to_id["motor_" + leg_position +
"_lower_leg"]),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=knee_friction_force)
def GetBasePosition(self):
"""Get the position of vrka's base.
Returns:
The position of vrka's base.
"""
position, _ = (self._pybullet_client.getBasePositionAndOrientation(
self.quadruped))
return position
def GetBaseOrientation(self):
"""Get the orientation of vrka's base, represented as quaternion.
Returns:
The orientation of vrka's base.
"""
_, orientation = (self._pybullet_client.getBasePositionAndOrientation(
self.quadruped))
return orientation
def GetBaseRollPitchYaw(self):
"""Get the rate of orientation change of the vrka's base in euler angle.
Returns:
rate of (roll, pitch, yaw) change of the vrka's base.
"""
vel = self._pybullet_client.getBaseVelocity(self.quadruped)
return np.asarray([vel[1][0], vel[1][1], vel[1][2]])
def GetBaseRollPitchYawRate(self):
"""Get the rate of orientation change of the vrka's base in euler angle.
This function mimicks the noisy sensor reading and adds latency.
Returns:
rate of (roll, pitch, yaw) change of the vrka's base polluted by noise
and latency.
"""
return self._AddSensorNoise(
np.array(self._control_observation[3 * self.num_motors +
4:3 * self.num_motors + 7]),
self._observation_noise_stdev[4])
def GetBaseTwist(self):
"""Get the Twist of minitaur's base.
Returns:
The Twist of the minitaur's base.
"""
return self._pybullet_client.getBaseVelocity(self.quadruped)
def GetActionDimension(self):
"""Get the length of the action list.
Returns:
The length of the action list.
"""
return self.num_motors
def GetObservationUpperBound(self):
"""Get the upper bound of the observation.
Returns:
The upper bound of an observation. See GetObservation() for the details
of each element of an observation.
NOTE: Changed just like GetObservation()
"""
upper_bound = np.array([0.0] * self.GetObservationDimension())
# roll, pitch
upper_bound[0:2] = 2.0 * np.pi
# acc, rate in x,y,z
upper_bound[2:8] = np.inf
# Leg Phases
upper_bound[8:12] = 2.0
# Contacts
if self.contacts:
upper_bound[12:] = 1.0
return upper_bound
def GetObservationLowerBound(self):
"""Get the lower bound of the observation."""
return -self.GetObservationUpperBound()
def GetObservationDimension(self):
"""Get the length of the observation list.
Returns:
The length of the observation list.
"""
return len(self.GetObservation())
def GetObservation(self):
"""Get the observations of minitaur.
It includes the angles, velocities, torques and the orientation of the base.
Returns:
The observation list. observation[0:8] are motor angles. observation[8:16]
are motor velocities, observation[16:24] are motor torques.
observation[24:28] is the orientation of the base, in quaternion form.
NOTE: DIVERGES FROM STOCK MINITAUR ENV. WILL LEAVE ORIGINAL COMMENTED
For my purpose, the observation space includes Roll and Pitch, as well as
acceleration and gyroscopic rate along the x,y,z axes. All of this
information can be collected from an onboard IMU. The reward function
will contain a hidden velocity reward (fwd, bwd) which cannot be measured
and so is not included. For spinning, the gyroscopic z rate will be used
as the (explicit) velocity reward.
This version operates without motor torques, angles and velocities. Erwin
Coumans' paper suggests a sparse observation space leads to higher reward
# NOTE: use True version for perfect data, or other for realistic data
"""
observation = []
# GETTING TWIST IN BODY FRAME
pos = self.GetBasePosition()
orn = self.GetBaseOrientation()
roll, pitch, yaw = self._pybullet_client.getEulerFromQuaternion(
[orn[0], orn[1], orn[2], orn[3]])
# rpy = LA.RPY(roll, pitch, yaw)
# R, _ = LA.TransToRp(rpy)
# T_wb = LA.RpToTrans(R, np.array([pos[0], pos[1], pos[2]]))
# T_bw = LA.TransInv(T_wb)
# Adj_Tbw = LA.Adjoint(T_bw)
# Get Linear and Angular Twist in WORLD FRAME
lin_twist, ang_twist = self.GetBaseTwist()
lin_twist = np.array([lin_twist[0], lin_twist[1], lin_twist[2]])
ang_twist = np.array([ang_twist[0], ang_twist[1], ang_twist[2]])
# Vw = np.concatenate((ang_twist, lin_twist))
# Vb = np.dot(Adj_Tbw, Vw)
# roll, pitch, _ = self._pybullet_client.getEulerFromQuaternion(
# [orn[0], orn[1], orn[2], orn[3]])
# # Get linear accelerations
# lin_twist = -Vb[3:]
# ang_twist = Vb[:3]
lin_acc = lin_twist - self.prev_lin_twist
if lin_acc.all() == 0.0:
lin_acc = self.prev_lin_acc
self.prev_lin_acc = lin_acc
# print("LIN TWIST: ", lin_twist)
self.prev_lin_twist = lin_twist
self.prev_ang_twist = ang_twist
# Get Contacts
CONTACT = list(self._pybullet_client.getContactPoints(self.quadruped))
FLC = 0
FRC = 0
BLC = 0
BRC = 0
if len(CONTACT) > 0:
for i in range(len(CONTACT)):
Contact_Link_Index = CONTACT[i][3]
if Contact_Link_Index == self._foot_id_list[0]:
FLC = 1
# print("FL CONTACT")
if Contact_Link_Index == self._foot_id_list[1]:
FRC = 1
# print("FR CONTACT")
if Contact_Link_Index == self._foot_id_list[2]:
BLC = 1
# print("BL CONTACT")
if Contact_Link_Index == self._foot_id_list[3]:
BRC = 1
# print("BR CONTACT")
# order: roll, pitch, gyro(x,y,z), acc(x, y, z)
observation.append(roll)
observation.append(pitch)
observation.extend(list(ang_twist))
observation.extend(list(lin_acc))
# Control Input
# observation.append(self.StepLength)
# observation.append(self.StepVelocity)
# observation.append(self.LateralFraction)
# observation.append(self.YawRate)
observation.extend(self.LegPhases)
if self.contacts:
observation.append(FLC)
observation.append(FRC)
observation.append(BLC)
observation.append(BRC)
# print("CONTACTS: {} {} {} {}".format(FLC, FRC, BLC, BRC))
return observation
def GetControlInput(self, controller):
""" Store Control Input as Observation
"""
_, _, StepLength, LateralFraction, YawRate, StepVelocity, _, _ = controller.return_bezier_params(
)
self.StepLength = StepLength
self.StepVelocity = StepVelocity
self.LateralFraction = LateralFraction
self.YawRate = YawRate
def GetLegPhases(self, TrajectoryGenerator):
""" Leg phases according to TG from 0->2
0->1: Stance
1->2 Swing
"""
self.LegPhases = TrajectoryGenerator.Phases
def GetExternalObservations(self, TrajectoryGenerator, controller):
""" Augment State Space
"""
self.GetControlInput(controller)
self.GetLegPhases(TrajectoryGenerator)
def ConvertFromLegModel(self, action):
# TODO
joint_angles = action
return joint_angles
def ApplyMotorLimits(self, joint_angles):
eps = 0.001
for i in range(len(joint_angles)):
LIM = MOTOR_LIMITS_BY_NAME[MOTOR_NAMES[i]]
joint_angles[i] = np.clip(joint_angles[i], LIM[0] + eps,
LIM[1] - eps)
return joint_angles
def ApplyAction(self, motor_commands):
"""Set the desired motor angles to the motors of the minitaur.
The desired motor angles are clipped based on the maximum allowed velocity.
If the pd_control_enabled is True, a torque is calculated according to
the difference between current and desired joint angle, as well as the joint
velocity. This torque is exerted to the motor. For more information about
PD control, please refer to: https://en.wikipedia.org/wiki/PID_controller.
Args:
motor_commands: The eight desired motor angles.
"""
# FIRST, APPLY MOTOR LIMITS:
motor_commands = self.ApplyMotorLimits(motor_commands)
if self._motor_velocity_limit < np.inf:
current_motor_angle = self.GetMotorAngles()
motor_commands_max = (current_motor_angle +
self.time_step * self._motor_velocity_limit)
motor_commands_min = (current_motor_angle -
self.time_step * self._motor_velocity_limit)
motor_commands = np.clip(motor_commands, motor_commands_min,
motor_commands_max)
if self._accurate_motor_model_enabled or self._pd_control_enabled:
q = self.GetMotorAngles()
qdot = self.GetMotorVelocities()
if self._accurate_motor_model_enabled:
actual_torque, observed_torque = self._motor_model.convert_to_torque(
motor_commands, q, qdot)
if self._motor_overheat_protection:
for i in range(self.num_motors):
if abs(actual_torque[i]) > OVERHEAT_SHUTDOWN_TORQUE:
self._overheat_counter[i] += 1
else:
self._overheat_counter[i] = 0
if (self._overheat_counter[i] >
OVERHEAT_SHUTDOWN_TIME / self.time_step):
self._motor_enabled_list[i] = False
# The torque is already in the observation space because we use
# GetMotorAngles and GetMotorVelocities.
self._observed_motor_torques = observed_torque
# Transform into the motor space when applying the torque.
self._applied_motor_torque = np.multiply(
actual_torque, self._motor_direction)
for motor_id, motor_torque, motor_enabled in zip(
self._motor_id_list, self._applied_motor_torque,
self._motor_enabled_list):
if motor_enabled:
self._SetMotorTorqueById(motor_id, motor_torque)
else:
self._SetMotorTorqueById(motor_id, 0)
else:
torque_commands = -self._kp * (
q - motor_commands) - self._kd * qdot
# The torque is already in the observation space because we use
# GetMotorAngles and GetMotorVelocities.
self._observed_motor_torques = torque_commands
# Transform into the motor space when applying the torque.
self._applied_motor_torques = np.multiply(
self._observed_motor_torques, self._motor_direction)
for motor_id, motor_torque in zip(self._motor_id_list,
self._applied_motor_torques):
self._SetMotorTorqueById(motor_id, motor_torque)
else:
motor_commands_with_direction = np.multiply(
motor_commands, self._motor_direction)
for motor_id, motor_command_with_direction in zip(
self._motor_id_list, motor_commands_with_direction):
self._SetDesiredMotorAngleById(motor_id,
motor_command_with_direction)
def Step(self, action):
for _ in range(self._action_repeat):
self.ApplyAction(action)
self._pybullet_client.stepSimulation()
self.RealisticObservation()
self._step_counter += 1
def GetTimeSinceReset(self):
return self._step_counter * self.time_step
def GetMotorAngles(self):
"""Gets the eight motor angles at the current moment, mapped to [-pi, pi].
Returns:
Motor angles, mapped to [-pi, pi].
"""
motor_angles = [
self._pybullet_client.getJointState(self.quadruped, motor_id)[0]
for motor_id in self._motor_id_list
]
motor_angles = np.multiply(motor_angles, self._motor_direction)
return MapToMinusPiToPi(motor_angles)
def GetMotorVelocities(self):
"""Get the velocity of all eight motors.
Returns:
Velocities of all eight motors.
"""
motor_velocities = [
self._pybullet_client.getJointState(self.quadruped, motor_id)[1]
for motor_id in self._motor_id_list
]
motor_velocities = np.multiply(motor_velocities, self._motor_direction)
return motor_velocities
def GetMotorTorques(self):
"""Get the amount of torque the motors are exerting.
Returns:
Motor torques of all eight motors.
"""
if self._accurate_motor_model_enabled or self._pd_control_enabled:
return self._observed_motor_torques
else:
motor_torques = [
self._pybullet_client.getJointState(self.quadruped,
motor_id)[3]
for motor_id in self._motor_id_list
]
motor_torques = np.multiply(motor_torques, self._motor_direction)
return motor_torques
def GetBaseMassesFromURDF(self):
"""Get the mass of the base from the URDF file."""
return self._base_mass_urdf
def GetBaseInertiasFromURDF(self):
"""Get the inertia of the base from the URDF file."""
return self._base_inertia_urdf
def GetLegMassesFromURDF(self):
"""Get the mass of the legs from the URDF file."""
return self._leg_masses_urdf
def GetLegInertiasFromURDF(self):
"""Get the inertia of the legs from the URDF file."""
return self._leg_inertia_urdf
def SetBaseMasses(self, base_mass):
"""Set the mass of vrka's base.
Args:
base_mass: A list of masses of each body link in CHASIS_LINK_IDS. The
length of this list should be the same as the length of CHASIS_LINK_IDS.
Raises:
ValueError: It is raised when the length of base_mass is not the same as
the length of self._chassis_link_ids.
"""
if len(base_mass) != len(self._chassis_link_ids):
raise ValueError(
"The length of base_mass {} and self._chassis_link_ids {} are not "
"the same.".format(len(base_mass),
len(self._chassis_link_ids)))
for chassis_id, chassis_mass in zip(self._chassis_link_ids, base_mass):
self._pybullet_client.changeDynamics(self.quadruped,
chassis_id,
mass=chassis_mass)
def SetLegMasses(self, leg_masses):
"""Set the mass of the legs.
Args:
leg_masses: The leg and motor masses for all the leg links and motors.
Raises:
ValueError: It is raised when the length of masses is not equal to number
of links + motors.
"""
if len(leg_masses) != len(self._leg_link_ids) + len(
self._motor_link_ids):
raise ValueError("The number of values passed to SetLegMasses are "
"different than number of leg links and motors.")
for leg_id, leg_mass in zip(self._leg_link_ids, leg_masses):
self._pybullet_client.changeDynamics(self.quadruped,
leg_id,
mass=leg_mass)
motor_masses = leg_masses[len(self._leg_link_ids):]
for link_id, motor_mass in zip(self._motor_link_ids, motor_masses):
self._pybullet_client.changeDynamics(self.quadruped,
link_id,
mass=motor_mass)
def SetBaseInertias(self, base_inertias):
"""Set the inertias of vrka's base.
Args:
base_inertias: A list of inertias of each body link in CHASIS_LINK_IDS.
The length of this list should be the same as the length of
CHASIS_LINK_IDS.
Raises:
ValueError: It is raised when the length of base_inertias is not the same
as the length of self._chassis_link_ids and base_inertias contains
negative values.
"""
if len(base_inertias) != len(self._chassis_link_ids):
raise ValueError(
"The length of base_inertias {} and self._chassis_link_ids {} are "
"not the same.".format(len(base_inertias),
len(self._chassis_link_ids)))
for chassis_id, chassis_inertia in zip(self._chassis_link_ids,
base_inertias):
for inertia_value in chassis_inertia:
if (np.asarray(inertia_value) < 0).any():
raise ValueError(
"Values in inertia matrix should be non-negative.")
self._pybullet_client.changeDynamics(
self.quadruped,
chassis_id,
localInertiaDiagonal=chassis_inertia)
def SetLegInertias(self, leg_inertias):
"""Set the inertias of the legs.
Args:
leg_inertias: The leg and motor inertias for all the leg links and motors.
Raises:
ValueError: It is raised when the length of inertias is not equal to
the number of links + motors or leg_inertias contains negative values.
"""
if len(leg_inertias) != len(self._leg_link_ids) + len(
self._motor_link_ids):
raise ValueError("The number of values passed to SetLegMasses are "
"different than number of leg links and motors.")
for leg_id, leg_inertia in zip(self._leg_link_ids, leg_inertias):
for inertia_value in leg_inertias:
if (np.asarray(inertia_value) < 0).any():
raise ValueError(
"Values in inertia matrix should be non-negative.")
self._pybullet_client.changeDynamics(
self.quadruped, leg_id, localInertiaDiagonal=leg_inertia)
motor_inertias = leg_inertias[len(self._leg_link_ids):]
for link_id, motor_inertia in zip(self._motor_link_ids,
motor_inertias):
for inertia_value in motor_inertias:
if (np.asarray(inertia_value) < 0).any():
raise ValueError(
"Values in inertia matrix should be non-negative.")
self._pybullet_client.changeDynamics(
self.quadruped, link_id, localInertiaDiagonal=motor_inertia)
def SetFootFriction(self, foot_friction=100.0):
"""Set the lateral friction of the feet.
Args:
foot_friction: The lateral friction coefficient of the foot. This value is
shared by all four feet.
"""
for link_id in self._foot_link_ids:
self._pybullet_client.changeDynamics(self.quadruped,
link_id,
lateralFriction=foot_friction)
# TODO(b/73748980): Add more API's to set other contact parameters.
def SetFootRestitution(self, link_id, foot_restitution=1.0):
"""Set the coefficient of restitution at the feet.
Args:
foot_restitution: The coefficient of restitution (bounciness) of the feet.
This value is shared by all four feet.
"""
self._pybullet_client.changeDynamics(self.quadruped,
link_id,
restitution=foot_restitution)
def SetJointFriction(self, joint_frictions):
for knee_joint_id, friction in zip(self._foot_link_ids,
joint_frictions):
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=knee_joint_id,
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=friction)
def GetNumKneeJoints(self):
return len(self._foot_link_ids)
def SetBatteryVoltage(self, voltage):
if self._accurate_motor_model_enabled:
self._motor_model.set_voltage(voltage)
def SetMotorViscousDamping(self, viscous_damping):
if self._accurate_motor_model_enabled:
self._motor_model.set_viscous_damping(viscous_damping)
def RealisticObservation(self):
"""Receive the observation from sensors.
This function is called once per step. The observations are only updated
when this function is called.
"""
self._observation_history.appendleft(self.GetObservation())
self._control_observation = self._GetDelayedObservation(
self._control_latency)
self._control_observation = self._AddSensorNoise(
self._control_observation, self._observation_noise_stdev)
return self._control_observation
def _GetDelayedObservation(self, latency):
"""Get observation that is delayed by the amount specified in latency.
Args:
latency: The latency (in seconds) of the delayed observation.
Returns:
observation: The observation which was actually latency seconds ago.
"""
if latency <= 0 or len(self._observation_history) == 1:
observation = self._observation_history[0]
else:
n_steps_ago = int(latency / self.time_step)
if n_steps_ago + 1 >= len(self._observation_history):
return self._observation_history[-1]
remaining_latency = latency - n_steps_ago * self.time_step
blend_alpha = remaining_latency / self.time_step
observation = (
(1.0 - blend_alpha) *
np.array(self._observation_history[n_steps_ago]) +
blend_alpha *
np.array(self._observation_history[n_steps_ago + 1]))
return observation
def _GetPDObservation(self):
pd_delayed_observation = self._GetDelayedObservation(self._pd_latency)
q = pd_delayed_observation[0:self.num_motors]
qdot = pd_delayed_observation[self.num_motors:2 * self.num_motors]
return (np.array(q), np.array(qdot))
def _AddSensorNoise(self, observation, noise_stdev):
# if self._observation_noise_stdev > 0:
# observation += (self.np_random.normal(scale=noise_stdev,
# size=observation.shape) *
# self.GetObservationUpperBound())
return observation
def SetControlLatency(self, latency):
"""Set the latency of the control loop.
It measures the duration between sending an action from Nvidia TX2 and
receiving the observation from microcontroller.
Args:
latency: The latency (in seconds) of the control loop.
"""
self._control_latency = latency
def GetControlLatency(self):
"""Get the control latency.
Returns:
The latency (in seconds) between when the motor command is sent and when
the sensor measurements are reported back to the controller.
"""
return self._control_latency
def SetMotorGains(self, kp, kd):
"""Set the gains of all motors.
These gains are PD gains for motor positional control. kp is the
proportional gain and kd is the derivative gain.
Args:
kp: proportional gain of the motors.
kd: derivative gain of the motors.
"""
self._kp = kp
self._kd = kd
if self._accurate_motor_model_enabled:
self._motor_model.set_motor_gains(kp, kd)
def GetMotorGains(self):
"""Get the gains of the motor.
Returns:
The proportional gain.
The derivative gain.
"""
return self._kp, self._kd
def SetMotorStrengthRatio(self, ratio):
"""Set the strength of all motors relative to the default value.
Args:
ratio: The relative strength. A scalar range from 0.0 to 1.0.
"""
if self._accurate_motor_model_enabled:
self._motor_model.set_strength_ratios([ratio] * self.num_motors)
def SetMotorStrengthRatios(self, ratios):
"""Set the strength of each motor relative to the default value.
Args:
ratios: The relative strength. A numpy array ranging from 0.0 to 1.0.
"""
if self._accurate_motor_model_enabled:
self._motor_model.set_strength_ratios(ratios)
def SetTimeSteps(self, action_repeat, simulation_step):
"""Set the time steps of the control and simulation.
Args:
action_repeat: The number of simulation steps that the same action is
repeated.
simulation_step: The simulation time step.
"""
self.time_step = simulation_step
self._action_repeat = action_repeat
@property
def chassis_link_ids(self):
return self._chassis_link_ids | 42.424403 | 107 | 0.611208 |
4a2776c9d6a8bafe28dd7d343285d29019467a3d | 3,909 | py | Python | src/gesture_api.py | ShikharJ/Gestures-Alive | 2a8c8a36400c3a7778c00fd76159a64f3a49a1e9 | [
"BSD-3-Clause"
] | 4 | 2017-12-28T20:26:51.000Z | 2021-08-06T11:57:41.000Z | src/gesture_api.py | ShikharJ/Gestures-Alive | 2a8c8a36400c3a7778c00fd76159a64f3a49a1e9 | [
"BSD-3-Clause"
] | null | null | null | src/gesture_api.py | ShikharJ/Gestures-Alive | 2a8c8a36400c3a7778c00fd76159a64f3a49a1e9 | [
"BSD-3-Clause"
] | 3 | 2019-02-22T18:52:14.000Z | 2019-08-23T06:31:45.000Z | import numpy
import math
class Gesture(object):
def __init__(self, name):
self.name = name
def get_name(self):
return self.name
def set_palm(self, hand_center, hand_radius):
self.hand_center = hand_center
self.hand_radius = hand_radius
def set_finger_position(self, finger_position):
self.finger_position = finger_position
self.finger_count = len(finger_position)
def calculate_angles(self):
self.angle = numpy.zeros(self.finger_count, dtype=int)
for i in range(self.finger_count):
x = self.finger_position[i][0]
y = self.finger_position[i][1]
self.angle[i] = abs(math.atan2((self.hand_center[1] - y), (x - self.hand_center[0])) * 180 / math.pi)
def define_gestures():
dictionary = {}
v = Gesture("V Sign")
v.set_palm((475, 225), 45)
v.set_finger_position([(490, 90), (415, 105)])
v.calculate_angles()
dictionary[v.get_name()] = v
l_right = Gesture("L Sign")
l_right.set_palm((475, 225), 50)
l_right.set_finger_position([(450, 62), (345, 200)])
l_right.calculate_angles()
dictionary[l_right.get_name()] = l_right
index_pointing = Gesture("Index Pointing")
index_pointing.set_palm((480, 230), 43)
index_pointing.set_finger_position([(475, 102)])
index_pointing.calculate_angles()
dictionary[index_pointing.get_name()] = index_pointing
return dictionary
def compare_gestures(primary, secondary):
if primary.finger_count == secondary.finger_count:
if primary.finger_count == 1:
angle_difference = primary.angle[0] - secondary.angle[0]
if angle_difference > 20:
result = 0
else:
primary_length = numpy.sqrt((primary.finger_position[0][0] - primary.hand_center[0]) ** 2
+ (primary.finger_position[0][1] - primary.hand_center[1]) ** 2)
secondary_length = numpy.sqrt((secondary.finger_position[0][0] - secondary.hand_center[0]) ** 2
+ (secondary.finger_position[0][1] - secondary.hand_center[1]) ** 2)
length_difference = primary_length / secondary_length
radius_difference = primary.hand_radius / secondary.hand_radius
length_score = abs(length_difference - radius_difference)
if length_score < 0.09:
result = secondary.get_name()
else:
result = 0
else:
angle_difference = []
for i in range(primary.finger_count):
angle_difference.append(primary.angle[i] - secondary.angle[i])
angle_score = max(angle_difference) - min(angle_difference)
if angle_score < 15:
length_difference = []
for i in range(primary.finger_count):
primary_length = numpy.sqrt((primary.finger_position[i][0] - primary.hand_center[0]) ** 2 + (primary.finger_position[i][1] - primary.hand_center[1]) ** 2)
secondary_length = numpy.sqrt((secondary.finger_position[i][0] - secondary.hand_center[0]) ** 2 + (secondary.finger_position[i][1] - secondary.hand_center[1]) ** 2)
length_difference.append(primary_length / secondary_length)
length_score = max(length_difference) - min(length_difference)
if length_score < 0.06:
result = secondary.get_name()
else:
result = 0
else:
result = 0
else:
result = 0
return result
def decide_gesture(source, gesture_dictionary):
for k in gesture_dictionary.keys():
result = compare_gestures(source, gesture_dictionary[k])
if result != 0:
return result
return "None"
| 36.877358 | 184 | 0.59913 |
4a277804f7a900ea1516bf5d0d530eab40f68a7c | 2,700 | py | Python | imu_test.py | anacsousa1/footLifter | df23db98759da66c874600651957e18b2416e676 | [
"Apache-2.0"
] | null | null | null | imu_test.py | anacsousa1/footLifter | df23db98759da66c874600651957e18b2416e676 | [
"Apache-2.0"
] | null | null | null | imu_test.py | anacsousa1/footLifter | df23db98759da66c874600651957e18b2416e676 | [
"Apache-2.0"
] | null | null | null | # ######################################################################################################################
# ## imu_test.py
# ## Description: testing the IMU
# ## - We test the connection and print the pitch angle.
# ## Library needed: imu, time, math, serial, sys libraries
# ## Python interpreter: Anaconda 2.2.0 (python 2.7)
# ## Author: Ana Carolina Cardoso de Sousa
# ## Email: [email protected]
# ## Created: May 29th 2015
# ######################################################################################################################
__authors__ = [
"\"Ana de Sousa\" <[email protected]>",
]
# Importing...
import time
import math
import serial
import imu
import sys
# Greetings
print "Welcome to ours IMU tester, let\'s get this started?\n"
# Ports and addresses
portIMU = 'COM9' # in windows, verify "Manage Devices"
addressIMU = 1 # the device must have a stick informing it
# Open ports
print '\tWe are trying to connect to the IMU (address ' + str(addressIMU) + ') to port ' + portIMU + '.'
try:
serialPortIMU = serial.Serial(portIMU, timeout=1, writeTimeout=1, baudrate=115200)
except serial.SerialException:
print '\t\tNo Hardware Found in ' + portIMU + '... :(\n \t\tExiting now. \n'
sys.exit(0)
if not serialPortIMU.isOpen(): # verify if it is already open
serialPortIMU.open()
device1 = imu.IMU(serialPortIMU, addressIMU) # Construct object
testing = device1.getEulerAngles() # Get some info
testing = testing.split(',', 6) # Convert to list
if len(testing) == 2: # testing connection
print '\t\tUnable to connect to the IMU... :(\n \t\tExiting now. \n'
sys.exit(1)
# Calibrating
print '\t\tWe are connected! Now, we are going to calibrate the IMU. Keep it still!\n'
device1.calibrate()
device1.tare()
print "\t\t\tIMU Calibrated!\n"
# Wait until the user press the 'Start' button
print '\n\t\tWhenever you\'re ready, press button 1 (the left one)!'
while not (device1.checkButtons() == 1):
pass
# Do it while 'Stop' button not pressed
dt = 0.5
print '\nPrinting pitch angle (sample time ' + str(dt) + ' seconds).'
while not (device1.checkButtons() == 2):
angles = device1.getEulerAngles() # get angles
angles = angles.split(',', 6) # convert to list
if len(angles) == 6: # if we connect correctly with the device
pitch = float(angles[4])
if pitch >= 0:
pitch = math.degrees(pitch)
else:
pitch = 360 + math.degrees(pitch)
print str(pitch)
time.sleep(dt)
# Bye bye
serialPortIMU.close() # close port
print 'Have a nice day!'
| 31.764706 | 120 | 0.584444 |
4a27781bf259e265f2fcf3d32a578706cc732b31 | 1,523 | py | Python | stonesoup/feeder/filter.py | dlast-dstl/Stone-Soup | 033254add5adc00097b746f81d6640308a3e3319 | [
"MIT"
] | 1 | 2021-04-13T11:47:42.000Z | 2021-04-13T11:47:42.000Z | stonesoup/feeder/filter.py | dlast-dstl/Stone-Soup | 033254add5adc00097b746f81d6640308a3e3319 | [
"MIT"
] | null | null | null | stonesoup/feeder/filter.py | dlast-dstl/Stone-Soup | 033254add5adc00097b746f81d6640308a3e3319 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from operator import attrgetter
from ..base import Property
from .base import Feeder
class MetadataReducer(Feeder):
"""Reduce detections so unique metadata value present at each time step.
This allows to reduce detections so a single detection is returned, based
on a particular metadata value, for example a unique identity. The most
recent detection will be yielded for each unique metadata value at each
time step.
"""
metadata_field = Property(
str,
doc="Field used to reduce unique set of detections")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._detections = set()
@property
def detections(self):
return self._detections
def detections_gen(self):
for time, detections in self.detector.detections_gen():
unique_detections = set()
sorted_detections = sorted(
detections, key=attrgetter('timestamp'), reverse=True)
meta_values = set()
for detection in sorted_detections:
meta_value = detection.metadata.get(self.metadata_field)
if meta_value not in meta_values:
unique_detections.add(detection)
# Ignore those without meta data value
if meta_value is not None:
meta_values.add(meta_value)
self._detections = unique_detections
yield time, unique_detections
| 34.613636 | 77 | 0.633618 |
4a27787bd23387a673e48299f1c4b348941da986 | 34,525 | py | Python | docs/3.0.0/_downloads/tutorial.py | sadielbartholomew/cf-python | 98541d8e55c703eca9bfba4168fb3d42755267da | [
"MIT"
] | 49 | 2019-10-08T12:46:39.000Z | 2022-02-06T01:13:30.000Z | docs/3.0.0/_downloads/tutorial.py | sadielbartholomew/cf-python | 98541d8e55c703eca9bfba4168fb3d42755267da | [
"MIT"
] | 279 | 2019-10-07T11:14:37.000Z | 2022-03-31T13:46:37.000Z | docs/3.0.0/_downloads/tutorial.py | sadielbartholomew/cf-python | 98541d8e55c703eca9bfba4168fb3d42755267da | [
"MIT"
] | 14 | 2019-10-25T08:54:06.000Z | 2022-01-31T10:42:39.000Z |
print("\n**Tutorial**\n")
print("\n**Sample datasets**\n")
print("\n**Import**\n")
import cf
cf.CF()
print("\n**Field construct**\n")
print("\n**Reading field constructs from datasets**\n")
x = cf.read('file.nc')
type(x)
len(x)
y = cf.read('*.nc')
len(y)
z = cf.read(['file.nc', 'precipitation_flux.nc'])
len(z)
try:
y = cf.read('$PWD')
except:
pass
else:
raise Exception("This should have failed!")
y = cf.read('$PWD', ignore_read_error=True)
len(y)
print("\n**Inspection**\n")
x = cf.read('file.nc')
x
q = x[0]
t = x[1]
q
print(q)
print(t)
q.dump()
t.dump()
print("\n**Visualization**\n")
print("\n**Field lists**\n")
x = cf.read('file.nc')
y = cf.read('precipitation_flux.nc')
x
y
y.extend(x)
y
y[2]
y[::-1]
len(y)
len(y + y)
len(y * 4)
for f in y:
print('field:', repr(f))
print("\n**Properties**\n")
q, t = cf.read('file.nc')
t.properties()
t.has_property('standard_name')
t.get_property('standard_name')
t.del_property('standard_name')
t.get_property('standard_name', default='not set')
t.set_property('standard_name', value='air_temperature')
t.get_property('standard_name', default='not set')
original = t.properties()
original
t.set_properties({'foo': 'bar', 'units': 'K'})
t.properties()
t.clear_properties()
t.properties()
t.set_properties(original)
t.properties()
t.identity()
t.identities()
print("\n**Metadata constructs**\n")
q, t = cf.read('file.nc')
t.coordinate_references
print(t.coordinate_references)
list(t.coordinate_references.keys())
for key, value in t.coordinate_references.items():
print(key, repr(value))
print(t.dimension_coordinates)
print(t.domain_axes)
q.constructs
print(q.constructs)
t.constructs
print(t.constructs)
print("\n**Data**\n")
q, t = cf.read('file.nc')
t.data
print(t.array)
t.dtype
t.ndim
t.shape
t.size
print(t.domain_axes)
t
t.data.shape
t.get_data_axes()
data = t.del_data()
t.has_data()
t.set_data(data)
t.data
d = cf.Data([1, 2, 3], units='days since 2004-2-28')
print(d.array)
print(d.datetime_array)
e = cf.Data([1, 2, 3], units='days since 2004-2-28', calendar='360_day')
print(d.array)
print(d.datetime_array)
date_time = cf.dt(2004, 2, 29)
date_time
d = cf.Data(date_time, calendar='gregorian')
print(d.array)
d.datetime_array
date_times = cf.dt_vector(['2004-02-29', '2004-02-30', '2004-03-01'], calendar='360_day')
print (date_times)
e = cf.Data(date_times)
print(e.array)
print(e.datetime_array)
d = cf.Data(['2004-02-29', '2004-02-30', '2004-03-01'], calendar='360_day')
d.Units
print(d.array)
print(d.datetime_array)
e = cf.Data(['2004-02-29', '2004-03-01', '2004-03-02'], dt=True)
e.Units
print(e.datetime_array)
f = cf.Data(['2004-02-29', '2004-03-01', '2004-03-02'])
print(f.array)
f.Units
try:
print(f.datetime_array)
except:
pass
else:
raise Exception("This should have failed!")
q, t = cf.read('file.nc')
t
t2 = t.squeeze()
t2
print(t2.dimension_coordinates)
t3 = t2.insert_dimension(axis='domainaxis3', position=1)
t3
t3.transpose([2, 0, 1])
t4 = t.transpose([2, 0, 1], constructs=True)
print("\n**Subspacing by index**\n")
q, t = cf.read('file.nc')
print(q)
new = q[::-1, 0]
print(new)
q
t[:, :, 1]
t[:, 0]
t[..., 6:3:-1, 3:6]
t[0, [2, 3, 9], [4, 8]]
t[0, :, -2]
q
q.cyclic()
q.constructs.domain_axis_identity('domainaxis1')
print(q[:, -2:3])
print(q[:, 3:-2:-1])
t.data[0, [2, 3, 9], [4, 8]]
print("\n**Assignment by index**\n")
q, t = cf.read('file.nc')
t[:, 0, 0] = -1
t[:, :, 1] = -2
t[..., 6:3:-1, 3:6] = -3
print(t.array)
import numpy
t[..., 6:3:-1, 3:6] = numpy.arange(9).reshape(3, 3)
t[0, [2, 9], [4, 8]] = cf.Data([[-4, -5]])
t[0, [4, 7], 0] = [[-10], [-11]]
print(t.array)
print(t[0, 0, -1].array)
t[0, -1, -1] /= -10
print(t[0, 0, -1].array)
t.data[0, 0, -1] = -99
print(t[0, 0, -1].array)
t[0, :, -2] = cf.masked
print(t.array)
t[0, 4, -2] = 99
print(t[0, 4, -2].array)
t.hardmask = False
t[0, 4, -2] = 99
print(t[0, 4, -2].array)
q, t = cf.read('file.nc')
t0 = t.copy()
u = t.squeeze(0)
u.transpose(inplace=True)
u.flip(inplace=True)
t[...] = u
t.allclose(t0)
t[:, :, 1:3] = u[2]
print(t[:, :, 1:3].array)
print(u[2].array)
t[:, :, 1:3] = u[2]
print(t[:, :, 1:3].array)
print("\n**Units**\n")
q, t = cf.read('file.nc')
t.units
t.Units
t.units = 'degreesC'
t.units
t.Units
t.Units += 273.15
t.units
t.Units
t.data
t.Units = cf.Units('degreesC')
t.data
t.units = 'Kelvin'
t.data
t.data
t[0, 0, 0] = cf.Data(1)
t.data
t[0, 0, 0] = cf.Data(1, 'degreesC')
t.data
air_temp = cf.read('air_temperature.nc')[0]
time = air_temp.coordinate('time')
time.units
time.calendar
time.Units
print("\n**Filtering metadata constructs**\n")
q, t = cf.read('file.nc')
print(t.constructs.filter_by_type('dimension_coordinate'))
print(t.constructs.filter_by_type('cell_method', 'field_ancillary'))
print(t.constructs.filter_by_property(
standard_name='air_temperature standard_error'))
print(t.constructs.filter_by_property(
standard_name='air_temperature standard_error',
units='K'))
print(t.constructs.filter_by_property(
'or',
standard_name='air_temperature standard_error',
units='m'))
print(t.constructs.filter_by_axis('and', 'domainaxis1'))
print(t.constructs.filter_by_measure('area'))
print(t.constructs.filter_by_method('maximum'))
print(t.constructs.filter_by_type('auxiliary_coordinate').filter_by_axis('and', 'domainaxis2'))
c = t.constructs.filter_by_type('dimension_coordinate')
d = c.filter_by_property(units='degrees')
print(d)
print(t)
print(t.constructs.filter_by_identity('X'))
print(t.constructs.filter_by_identity('latitude'))
print(t.constructs.filter_by_identity('long_name=Grid latitude name'))
print(t.constructs.filter_by_identity('measure:area'))
print(t.constructs.filter_by_identity('ncvar%b'))
print(t.constructs.filter_by_identity('latitude'))
print(t.constructs('latitude'))
print(t.constructs.filter_by_key('domainancillary2'))
print(t.constructs.filter_by_key('cellmethod1'))
print(t.constructs.filter_by_key('auxiliarycoordinate2', 'cellmeasure0'))
c = t.constructs('radiation_wavelength')
c
print(c)
len(c)
c = t.constructs.filter_by_type('auxiliary_coordinate')
c
c.inverse_filter()
print(t.constructs.filter_by_type('cell_measure'))
print(t.cell_measures)
print("\n**Metadata construct access**\n")
t.construct('latitude')
t.construct('latitude', key=True)
key = t.construct_key('latitude')
t.get_construct(key)
t.constructs('latitude').value()
c = t.constructs.get(key)
t.constructs[key]
t.auxiliary_coordinate('latitude')
t.auxiliary_coordinate('latitude', key=True)
try:
t.construct('measure:volume')
except:
pass
else:
raise Exception("This should have failed!")
t.construct('measure:volume', False)
c = t.constructs.filter_by_measure('volume')
len(c)
try:
c.value()
except:
pass
else:
raise Exception("This should have failed!")
c.value(default='No construct')
try:
c.value(default=KeyError('My message'))
except:
pass
else:
raise Exception("This should have failed!")
d = t.constructs('units=degrees')
len(d)
try:
d.value()
except:
pass
else:
raise Exception("This should have failed!")
print(d.value(default=None))
lon = q.construct('longitude')
lon
lon.set_property('long_name', 'Longitude')
lon.properties()
area = t.constructs.filter_by_property(units='km2').value()
area
area.identity()
area.identities()
lon = q.constructs('longitude').value()
lon
lon.data
lon.data[2]
lon.data[2] = 133.33
print(lon.array)
lon.data[2] = 112.5
key = t.construct_key('latitude')
key
t.get_data_axes(key)
t.constructs.data_axes()
print("\n**Time**\n")
time = q.construct('time')
time
time.get_property('units')
time.get_property('calendar', default='standard')
print(time.array)
print(time.datetime_array)
cm = cf.TimeDuration(1, 'calendar_month', day=16, hour=12)
cm
cf.dt(2000, 2, 1) + cm
cf.Data([1, 2, 3], 'days since 2000-02-01') + cm
cm.interval(cf.dt(2000, 2, 1))
cm.bounds(cf.dt(2000, 2, 1))
print("\n**Domain**\n")
domain = t.domain
domain
print(domain)
description = domain.dump(display=False)
domain_latitude = t.domain.constructs('latitude').value()
field_latitude = t.constructs('latitude').value()
domain_latitude.set_property('test', 'set by domain')
print(field_latitude.get_property('test'))
field_latitude.set_property('test', 'set by field')
print(domain_latitude.get_property('test'))
domain_latitude.del_property('test')
field_latitude.has_property('test')
print("\n**Metadata construct types**\n")
print(q.domain_axes)
d = q.domain_axes.get('domainaxis1')
d
d.get_size()
print(t.coordinates)
lon = t.constructs('grid_longitude').value()
bounds = lon.bounds
bounds
bounds.data
print(bounds.array)
bounds.inherited_properties()
bounds.properties()
a = t.constructs.get('domainancillary0')
print(a.array)
bounds = a.bounds
bounds
print(bounds.array)
crs = t.constructs('standard_name:atmosphere_hybrid_height_coordinate').value()
crs
crs.dump()
crs.coordinates()
crs.datum
crs.datum.parameters()
crs.coordinate_conversion
crs.coordinate_conversion.parameters()
crs.coordinate_conversion.domain_ancillaries()
print(t.cell_methods)
t.cell_methods.ordered()
cm = t.constructs('method:mean').value()
cm
cm.get_axes()
cm.get_method()
cm.qualifiers()
cm.get_qualifier('where')
a = t.get_construct('fieldancillary0')
a
a.properties()
a.data
print("\n**Cyclic domain axes**\n")
print(q.array[0])
print(q.roll('X', shift=1).array[0])
qr = q.roll('X', shift=-3)
print(qr.array[0])
print(q.dimension_coordinate('X').array)
print(qr.dimension_coordinate('X').array)
print(q.anchor('X', -150))
print(q.anchor('X', -750))
print("\n**Subspacing by metadata**\n")
print(q)
print(q.construct('X').array)
q2 = q.subspace(X=112.5)
print(q2)
print(q.construct('latitude').array)
print(q.subspace(X=112.5, latitude=cf.gt(-60)))
c = cf.eq(-45) | cf.ge(20)
c
print(q.subspace(latitude=c))
print(q.subspace(X=[1, 2, 4], Y=slice(None, None, -1)))
print(q.subspace(X=cf.wi(-100, 200)))
print (q.subspace(X=slice(-2, 4)))
a = cf.read('timeseries.nc')[0]
print (a)
print(a.coordinate('T').array[0:9])
print(a.coordinate('T').datetime_array[0:9])
print(a.subspace(T=410.5))
print(a.subspace(T=cf.dt('1960-04-16')))
print(a.subspace(T=cf.wi(cf.dt('1962-11-01'), cf.dt('1967-03-17 07:30'))))
print(q.array)
q2 = q.subspace('compress', X=[1, 2, 4, 6])
print(q2)
print(q2.array)
q2 = q.subspace('envelope', X=[1, 2, 4, 6])
print(q2)
print(q2.array)
q2 = q.subspace('full', X=[1, 2, 4, 6])
print(q2)
print(q2.array)
print(t)
print(t.construct('latitude').array)
t2 = t.subspace(latitude=cf.wi(51, 53))
print(t2.array)
print("\n**Sorting and selecting from field lists**\n")
fl = cf.read('file.nc')
fl
fl.sort()
fl
fl.sort(key=lambda f: f.units)
fl
fl = cf.read('*.nc')
fl
fl.select_by_identity('precipitation_flux')
import re
fl.select_by_identity(re.compile('.*potential.*'))
fl.select_by_identity('relative_humidity')
fl('air_temperature')
fl.select('air_temperature')
print(t)
t.match_by_identity('air_temperature')
t.match_by_rank(4)
t.match_by_units('degC', exact=False)
t.match_by_construct(longitude=cf.wi(-10, 10))
t.match('specific_humidity')
t.match('specific_humidity', 'air_temperature')
print("\n**Encapsulating conditions**\n")
c = cf.Query('lt', 3)
c
c.evaluate(2)
c == 2
c != 2
c.evaluate(3)
c == cf.Data([1, 2, 3])
c == numpy.array([1, 2, 3])
ge3 = cf.Query('ge', 3)
lt5 = cf.Query('lt', 5)
c = ge3 & lt5
c
c == 2
c != 2
c = ge3 | lt5
c
c == 2
c &= cf.Query('set', [1, 3, 5])
c
c == 2
c == 3
upper_bounds_ge_minus4 = cf.Query('ge', -4, attr='upper_bounds')
X = t.dimension_coordinate('X')
X
print(X.bounds.array)
print((upper_bounds_ge_minus4 == X).array)
cf.ge(3)
cf.ge(cf.dt('2000-3-23'))
cf.year(1999)
cf.jja()
cf.contains(4)
cf.cellsize(cf.lt(10, 'degrees'))
print("\n**Assignment by condition**\n")
t = cf.read('file.nc')[1]
print(t.array)
u = t.where(cf.lt(273.15), x=cf.masked)
print(u.array)
u = t.where(cf.lt(273.15), x=0, y=1)
print(u.array)
print(t.where(u, x=-t, y=-99).array)
print(t.where(cf.gt(0.5), x=cf.masked, construct='grid_latitude').array)
print("\n**Field creation**\n")
print("\n**Stage 1:** The field construct is created without metadata\n")
print("\n**Stage 2:** Metadata constructs are created independently.\n")
print("\n**Stage 3:** The metadata constructs are inserted into the field\n")
p = cf.Field(properties={'standard_name': 'precipitation_flux'})
p
dc = cf.DimensionCoordinate(properties={'long_name': 'Longitude'},
data=cf.Data([0, 1, 2.]))
dc
fa = cf.FieldAncillary(
properties={'standard_name': 'precipitation_flux status_flag'},
data=cf.Data(numpy.array([0, 0, 2], dtype='int8')))
fa
p = cf.Field()
p
p.set_property('standard_name', 'precipitation_flux')
p
dc = cf.DimensionCoordinate()
dc
dc.set_property('long_name', 'Longitude')
dc.set_data(cf.Data([1, 2, 3.]))
dc
fa = cf.FieldAncillary(
data=cf.Data(numpy.array([0, 0, 2], dtype='int8')))
fa
fa.set_property('standard_name', 'precipitation_flux status_flag')
fa
longitude_axis = p.set_construct(cf.DomainAxis(3))
longitude_axis
key = p.set_construct(dc, axes=longitude_axis)
key
cm = cf.CellMethod(axes=longitude_axis, method='minimum')
p.set_construct(cm)
import numpy
import cf
# Initialise the field construct with properties
Q = cf.Field(properties={'project': 'research',
'standard_name': 'specific_humidity',
'units': '1'})
# Create the domain axis constructs
domain_axisT = cf.DomainAxis(1)
domain_axisY = cf.DomainAxis(5)
domain_axisX = cf.DomainAxis(8)
# Insert the domain axis constructs into the field. The
# set_construct method returns the domain axis construct key that
# will be used later to specify which domain axis corresponds to
# which dimension coordinate construct.
axisT = Q.set_construct(domain_axisT)
axisY = Q.set_construct(domain_axisY)
axisX = Q.set_construct(domain_axisX)
# Create and insert the field construct data
data = cf.Data(numpy.arange(40.).reshape(5, 8))
Q.set_data(data)
# Create the cell method constructs
cell_method1 = cf.CellMethod(axes='area', method='mean')
cell_method2 = cf.CellMethod()
cell_method2.set_axes(axisT)
cell_method2.set_method('maximum')
# Insert the cell method constructs into the field in the same
# order that their methods were applied to the data
Q.set_construct(cell_method1)
Q.set_construct(cell_method2)
# Create a "time" dimension coordinate construct, with coordinate
# bounds
dimT = cf.DimensionCoordinate(
properties={'standard_name': 'time',
'units': 'days since 2018-12-01'},
data=cf.Data([15.5]),
bounds=cf.Bounds(data=cf.Data([[0,31.]])))
# Create a "longitude" dimension coordinate construct, without
# coordinate bounds
dimX = cf.DimensionCoordinate(data=cf.Data(numpy.arange(8.)))
dimX.set_properties({'standard_name': 'longitude',
'units': 'degrees_east'})
# Create a "longitude" dimension coordinate construct
dimY = cf.DimensionCoordinate(properties={'standard_name': 'latitude',
'units' : 'degrees_north'})
array = numpy.arange(5.)
dimY.set_data(cf.Data(array))
# Create and insert the latitude coordinate bounds
bounds_array = numpy.empty((5, 2))
bounds_array[:, 0] = array - 0.5
bounds_array[:, 1] = array + 0.5
bounds = cf.Bounds(data=cf.Data(bounds_array))
dimY.set_bounds(bounds)
# Insert the dimension coordinate constructs into the field,
# specifying to which domain axis each one corresponds
Q.set_construct(dimT)
Q.set_construct(dimY)
Q.set_construct(dimX)
Q.dump()
import numpy
import cf
# Initialize the field construct
tas = cf.Field(
properties={'project': 'research',
'standard_name': 'air_temperature',
'units': 'K'})
# Create and set domain axis constructs
axis_T = tas.set_construct(cf.DomainAxis(1))
axis_Z = tas.set_construct(cf.DomainAxis(1))
axis_Y = tas.set_construct(cf.DomainAxis(10))
axis_X = tas.set_construct(cf.DomainAxis(9))
# Set the field construct data
tas.set_data(cf.Data(numpy.arange(90.).reshape(10, 9)))
# Create and set the cell method constructs
cell_method1 = cf.CellMethod(
axes=[axis_Y, axis_X],
method='mean',
qualifiers={'where': 'land',
'interval': [cf.Data(0.1, units='degrees')]})
cell_method2 = cf.CellMethod(axes=axis_T, method='maximum')
tas.set_construct(cell_method1)
tas.set_construct(cell_method2)
# Create and set the field ancillary constructs
field_ancillary = cf.FieldAncillary(
properties={'standard_name': 'air_temperature standard_error',
'units': 'K'},
data=cf.Data(numpy.arange(90.).reshape(10, 9)))
tas.set_construct(field_ancillary)
# Create and set the dimension coordinate constructs
dimension_coordinate_T = cf.DimensionCoordinate(
properties={'standard_name': 'time',
'units': 'days since 2018-12-01'},
data=cf.Data([15.5]),
bounds=cf.Bounds(data=cf.Data([[0., 31]])))
dimension_coordinate_Z = cf.DimensionCoordinate(
properties={'computed_standard_name': 'altitude',
'standard_name': 'atmosphere_hybrid_height_coordinate'},
data = cf.Data([1.5]),
bounds=cf.Bounds(data=cf.Data([[1.0, 2.0]])))
dimension_coordinate_Y = cf.DimensionCoordinate(
properties={'standard_name': 'grid_latitude',
'units': 'degrees'},
data=cf.Data(numpy.arange(10.)),
bounds=cf.Bounds(data=cf.Data(numpy.arange(20).reshape(10, 2))))
dimension_coordinate_X = cf.DimensionCoordinate(
properties={'standard_name': 'grid_longitude',
'units': 'degrees'},
data=cf.Data(numpy.arange(9.)),
bounds=cf.Bounds(data=cf.Data(numpy.arange(18).reshape(9, 2))))
dim_T = tas.set_construct(dimension_coordinate_T, axes=axis_T)
dim_Z = tas.set_construct(dimension_coordinate_Z, axes=axis_Z)
dim_Y = tas.set_construct(dimension_coordinate_Y)
dim_X = tas.set_construct(dimension_coordinate_X)
# Create and set the auxiliary coordinate constructs
auxiliary_coordinate_lat = cf.AuxiliaryCoordinate(
properties={'standard_name': 'latitude',
'units': 'degrees_north'},
data=cf.Data(numpy.arange(90.).reshape(10, 9)))
auxiliary_coordinate_lon = cf.AuxiliaryCoordinate(
properties={'standard_name': 'longitude',
'units': 'degrees_east'},
data=cf.Data(numpy.arange(90.).reshape(9, 10)))
array = numpy.ma.array(list('abcdefghij'))
array[0] = numpy.ma.masked
auxiliary_coordinate_name = cf.AuxiliaryCoordinate(
properties={'long_name': 'Grid latitude name'},
data=cf.Data(array))
aux_LAT = tas.set_construct(auxiliary_coordinate_lat)
aux_LON = tas.set_construct(auxiliary_coordinate_lon)
aux_NAME = tas.set_construct(auxiliary_coordinate_name)
# Create and set domain ancillary constructs
domain_ancillary_a = cf.DomainAncillary(
properties={'units': 'm'},
data=cf.Data([10.]),
bounds=cf.Bounds(data=cf.Data([[5., 15.]])))
domain_ancillary_b = cf.DomainAncillary(
properties={'units': '1'},
data=cf.Data([20.]),
bounds=cf.Bounds(data=cf.Data([[14, 26.]])))
domain_ancillary_orog = cf.DomainAncillary(
properties={'standard_name': 'surface_altitude',
'units': 'm'},
data=cf.Data(numpy.arange(90.).reshape(10, 9)))
domain_anc_A = tas.set_construct(domain_ancillary_a, axes=axis_Z)
domain_anc_B = tas.set_construct(domain_ancillary_b, axes=axis_Z)
domain_anc_OROG = tas.set_construct(domain_ancillary_orog)
# Create the datum for the coordinate reference constructs
datum = cf.Datum(parameters={'earth_radius': 6371007.})
# Create the coordinate conversion for the horizontal coordinate
# reference construct
coordinate_conversion_h = cf.CoordinateConversion(
parameters={'grid_mapping_name': 'rotated_latitude_longitude',
'grid_north_pole_latitude': 38.0,
'grid_north_pole_longitude': 190.0})
# Create the coordinate conversion for the vertical coordinate
# reference construct
coordinate_conversion_v = cf.CoordinateConversion(
parameters={'standard_name': 'atmosphere_hybrid_height_coordinate',
'computed_standard_name': 'altitude'},
domain_ancillaries={'a': domain_anc_A,
'b': domain_anc_B,
'orog': domain_anc_OROG})
# Create the vertical coordinate reference construct
horizontal_crs = cf.CoordinateReference(
datum=datum,
coordinate_conversion=coordinate_conversion_h,
coordinates=[dim_X,
dim_Y,
aux_LAT,
aux_LON])
# Create the vertical coordinate reference construct
vertical_crs = cf.CoordinateReference(
datum=datum,
coordinate_conversion=coordinate_conversion_v,
coordinates=[dim_Z])
# Set the coordinate reference constructs
tas.set_construct(horizontal_crs)
tas.set_construct(vertical_crs)
# Create and set the cell measure constructs
cell_measure = cf.CellMeasure(measure='area',
properties={'units': 'km2'},
data=cf.Data(numpy.arange(90.).reshape(9, 10)))
tas.set_construct(cell_measure)
print(tas)
import netCDF4
nc = netCDF4.Dataset('file.nc', 'r')
v = nc.variables['ta']
netcdf_array = cf.NetCDFArray(filename='file.nc', ncvar='ta',
dtype=v.dtype, ndim=v.ndim,
shape=v.shape, size=v.size)
data_disk = cf.Data(netcdf_array)
numpy_array = v[...]
data_memory = cf.Data(numpy_array)
data_disk.equals(data_memory)
key = tas.construct_key('surface_altitude')
orog = tas.convert(key)
print(orog)
orog1 = tas.convert(key, full_domain=False)
print(orog1)
cf.write(tas, 'tas.nc')
f = cf.read('tas.nc')
f
fields = cf.read('tas.nc', extra='domain_ancillary')
fields
orog_from_file = fields[3]
print(orog_from_file)
print("\n**Copying**\n")
u = t.copy()
u.data[0, 0, 0] = -1e30
u.data[0, 0, 0]
t.data[0, 0, 0]
u.del_construct('grid_latitude')
u.constructs('grid_latitude')
t.constructs('grid_latitude')
import copy
u = copy.deepcopy(t)
orog = t.constructs('surface_altitude').value().copy()
print("\n**Equality**\n")
t.equals(t)
t.equals(t.copy())
t.equals(t[...])
t.equals(q)
t.equals(q, verbose=True)
cf.ATOL()
cf.RTOL()
original = cf.RTOL(0.00001)
cf.RTOL()
cf.RTOL(original)
cf.RTOL()
orog = t.constructs('surface_altitude').value()
orog.equals(orog.copy())
print("\n**NetCDF interface**\n")
print(t.constructs.filter_by_ncvar('b'))
t.constructs('ncvar%x').value()
t.constructs('ncdim%x')
q.nc_get_variable()
q.nc_global_attributes()
q.nc_set_variable('humidity')
q.nc_get_variable()
q.constructs('latitude').value().nc_get_variable()
print("\n**Writing to disk**\n")
print(q)
cf.write(q, 'q_file.nc')
x
cf.write(x, 'new_file.nc')
f = cf.read('q_file.nc')[0]
q.equals(f)
f.set_property('model', 'model_A')
cf.write(f, 'f_file.nc', global_attributes='model')
f.nc_global_attributes()
f.nc_set_global_attribute('model')
f.nc_global_attributes()
cf.write(f, 'f_file.nc')
f.set_property('information', 'variable information')
f.properties()
f.nc_set_global_attribute('information', 'global information')
f.nc_global_attributes()
cf.write(f, 'f_file.nc')
cf.write(f, 'f_file.nc', file_descriptors={'history': 'created in 2019'})
f_file = cf.read('f_file.nc')[0]
f_file.nc_global_attributes()
f_file.properties()
f_file.nc_global_attributes()
f_file.set_property('Conventions', 'UGRID1.0')
cf.write(f, 'f_file.nc', Conventions='UGRID1.0')
print(q)
key = q.construct_key('time')
axes = q.get_data_axes(key)
axes
q2 = q.insert_dimension(axis=axes[0])
q2
cf.write(q2, 'q2_file.nc')
print("\n**External variables**\n")
u = cf.read('parent.nc')[0]
print(u)
area = u.constructs('measure:area').value()
area
area.nc_get_external()
area.nc_get_variable()
area.properties()
area.has_data()
g = cf.read('parent.nc', external='external.nc')[0]
print(g)
area = g.construct('measure:area')
area
area.nc_get_external()
area.nc_get_variable()
area.properties()
area.data
area.nc_set_external(True)
cf.write(g, 'new_parent.nc')
cf.write(g, 'new_parent.nc', external='new_external.nc')
print("\n**Statistical collapses**\n")
a = cf.read('timeseries.nc')[0]
print(a)
b = a.collapse('minimum')
print(b)
print(b.array)
b = a.collapse('maximum', axes='T')
b = a.collapse('T: maximum')
print(b)
print(b.array)
b = a.collapse('maximum', axes=['X', 'Y'])
b = a.collapse('X: Y: maximum')
print(b)
b = a.collapse('area: maximum')
print(b)
b = a.collapse('T: mean', weights='T')
print(b)
print (b.array)
w = a.weights(weights='T')
print(w)
print(w.array)
b = a.collapse('T: Y: mean', weights='Y')
print(b)
print (b.array)
b = a.collapse('area: mean', weights='area')
print(b)
b = a.collapse('area: mean', weights='area').collapse('T: maximum')
print(b)
print(b.array)
b = a.collapse('area: mean T: maximum', weights='area')
print(b.array)
y = cf.Y(month=12)
y
b = a.collapse('T: maximum', group=y)
print(b)
b = a.collapse('T: maximum', group=6)
print(b)
b = a.collapse('T: maximum', group=cf.djf())
print(b)
c = cf.seasons()
c
b = a.collapse('T: maximum', group=c)
print(b)
b = a.collapse('X: mean', group=cf.Data(180, 'degrees'))
print(b)
b = a.collapse('T: mean within years T: mean over years',
within_years=cf.seasons(), weights='T')
print(b)
print(b.coordinate('T').bounds.datetime_array)
b = a.collapse('T: minimum within years T: variance over years',
within_years=cf.seasons(), weights='T')
print(b)
print(b.coordinate('T').bounds.datetime_array)
b = a.collapse('T: mean within years T: mean over years', weights='T',
within_years=cf.seasons(), over_years=cf.Y(5))
print(b)
print(b.coordinate('T').bounds.datetime_array)
b = a.collapse('T: mean within years T: mean over years', weights='T',
within_years=cf.seasons(), over_years=cf.year(cf.wi(1963, 1968)))
print(b)
print(b.coordinate('T').bounds.datetime_array)
b = a.collapse('T: standard_deviation within years',
within_years=cf.seasons(), weights='T')
print(b)
c = b.collapse('T: maximum over years')
print(c)
print("\n**Regridding**\n")
a = cf.read('air_temperature.nc')[0]
b = cf.read('precipitation_flux.nc')[0]
print(a)
print(b)
c = a.regrids(b, 'conservative')
print(c)
import numpy
lat = cf.DimensionCoordinate(data=cf.Data(numpy.arange(-90, 92.5, 2.5), 'degrees_north'))
lon = cf.DimensionCoordinate(data=cf.Data(numpy.arange(0, 360, 5.0), 'degrees_east'))
c = a.regrids({'latitude': lat, 'longitude': lon}, 'bilinear')
time = cf.DimensionCoordinate()
time.standard_name='time'
time.set_data(cf.Data(numpy.arange(0.5, 60, 1),
units='days since 1860-01-01', calendar='360_day'))
time
c = a.regridc({'T': time}, axes='T', method='bilinear')
try:
c = a.regridc({'T': time}, axes='T', method='conservative')
except:
pass
else:
raise Exception("This should have failed!")
bounds = time.create_bounds()
time.set_bounds(bounds)
c = a.regridc({'T': time}, axes='T', method='conservative')
print(c)
v = cf.read('vertical.nc')[0]
print(v)
z_p = v.construct('Z')
print(z_p.array)
z_ln_p = z_p.log()
print(z_ln_p.array)
_ = v.replace_construct('Z', z_ln_p)
new_z_p = cf.DimensionCoordinate(data=cf.Data([800, 705, 632, 510, 320.], 'hPa'))
new_z_ln_p = new_z_p.log()
new_v = v.regridc({'Z': new_z_ln_p}, axes='Z', method='bilinear')
new_v.replace_construct('Z', new_z_p)
print(new_v)
print("\n**Mathematical operations**\n")
q, t = cf.read('file.nc')
t.data.stats()
x = t + t
x
x.min()
(t - 2).min()
(2 + t).min()
(t * list(range(9))).min()
(t + cf.Data(numpy.arange(20, 29), '0.1 K')).min()
u = t.copy()
u.transpose(inplace=True)
u.Units -= 273.15
u[0]
t + u[0]
t.identities()
u = t * cf.Data(10, 'ms-1')
u.identities()
q, t = cf.read('file.nc')
print(q.array)
print(-q.array)
print(abs(-q.array))
q, t = cf.read('file.nc')
print(q.array)
print((q == q).array)
print((q < 0.05).array)
print((q >= q[0]).array)
q.identities()
r = q > q.mean()
r.identities()
t.min()
u = t.copy()
new_data = t.data + t.data
u.set_data(new_data)
u
u.min()
u[...] = new_data
u.min()
t.data -= t.data
t.min()
q, t = cf.read('file.nc')
lat = q.dimension_coordinate('latitude')
lat.data
sin_lat = lat.sin()
sin_lat.data
q
q.log()
q.exp()
t
t.log(base=10)
try:
t.exp()
except:
pass
else:
raise Exception("This should have failed!")
print(q)
q.iscyclic('X')
r = q.convolution_filter([0.1, 0.15, 0.5, 0.15, 0.1], axis='X')
print(r)
print(q.dimension_coordinate('X').bounds.array)
print(r.dimension_coordinate('X').bounds.array)
from scipy.signal import windows
exponential_weights = windows.exponential(3)
print(exponential_weights)
r = q.convolution_filter(exponential_weights, axis='Y')
print(r.array)
r = q.derivative('X')
r = q.derivative('Y', one_sided_at_boundary=True)
u, v = cf.read('wind_components.nc')
zeta = cf.relative_vorticity(u, v)
print(zeta)
print(zeta.array.round(8))
a = cf.read('timeseries.nc')[0]
print(a)
b = a.cumsum('T')
print(b)
print(a.coordinate('T').bounds[-1].dtarray)
print(b.coordinate('T').bounds[-1].dtarray)
print("\n**Aggregation**\n")
a = cf.read('air_temperature.nc')[0]
a
a_parts = [a[0, : , 0:30], a[0, :, 30:], a[1, :, 0:30], a[1, :, 30:]]
a_parts
for i, f in enumerate(a_parts):
cf.write(f, str(i)+'_air_temperature.nc')
x = cf.read('[0-3]_air_temperature.nc')
y = cf.read('[0-3]_air_temperature.nc', aggregate=False)
z = cf.aggregate(y)
x.equals(z)
print("\n**Compression**\n")
h = cf.read('contiguous.nc')[0]
print(h)
print(h.array)
h.data.get_compression_type()
print(h.data.compressed_array)
count_variable = h.data.get_count()
count_variable
print(count_variable.array)
station2 = h[1]
station2
print(station2.array)
h.data.get_compression_type()
h.data[1, 2] = -9
print(h.array)
h.data.get_compression_type()
import numpy
import cf
# Define the ragged array values
ragged_array = cf.Data([280, 281, 279, 278, 279.5])
# Define the count array values
count_array = [1, 4]
# Create the count variable
count_variable = cf.Count(data=cf.Data(count_array))
count_variable.set_property('long_name', 'number of obs for this timeseries')
# Create the contiguous ragged array object, specifying the
# uncompressed shape
array = cf.RaggedContiguousArray(
compressed_array=ragged_array,
shape=(2, 4), size=8, ndim=2,
count_variable=count_variable)
# Create the field construct with the domain axes and the ragged
# array
T = cf.Field()
T.set_properties({'standard_name': 'air_temperature',
'units': 'K',
'featureType': 'timeSeries'})
# Create the domain axis constructs for the uncompressed array
X = T.set_construct(cf.DomainAxis(4))
Y = T.set_construct(cf.DomainAxis(2))
# Set the data for the field
T.set_data(cf.Data(array))
T
print(T.array)
T.data.get_compression_type()
print(T.data.compressed_array)
count_variable = T.data.get_count()
count_variable
print(count_variable.array)
cf.write(T, 'T_contiguous.nc')
p = cf.read('gathered.nc')[0]
print(p)
print(p.array)
p.data.get_compression_type()
print(p.data.compressed_array)
list_variable = p.data.get_list()
list_variable
print(list_variable.array)
p[0]
p[1, :, 3:5]
p.data.get_compression_type()
p.data[1] = -9
p.data.get_compression_type()
import numpy
import cf
# Define the gathered values
gathered_array = cf.Data([[2, 1, 3], [4, 0, 5]])
# Define the list array values
list_array = [1, 4, 5]
# Create the list variable
list_variable = cf.List(data=cf.Data(list_array))
# Create the gathered array object, specifying the uncompressed
# shape
array = cf.GatheredArray(
compressed_array=gathered_array,
compressed_dimension=1,
shape=(2, 3, 2), size=12, ndim=3,
list_variable=list_variable)
# Create the field construct with the domain axes and the gathered
# array
P = cf.Field(properties={'standard_name': 'precipitation_flux',
'units': 'kg m-2 s-1'})
# Create the domain axis constructs for the uncompressed array
T = P.set_construct(cf.DomainAxis(2))
Y = P.set_construct(cf.DomainAxis(3))
X = P.set_construct(cf.DomainAxis(2))
# Set the data for the field
P.set_data(cf.Data(array), axes=[T, Y, X])
P
print(P.data.array)
P.data.get_compression_type()
print(P.data.compressed_array)
list_variable = P.data.get_list()
list_variable
print(list_variable.array)
cf.write(P, 'P_gathered.nc')
print("\n**PP and UM fields files**\n")
pp = cf.read('umfile.pp')
pp
print(pp[0])
cf.write(pp, 'umfile1.nc')
type(cf.read_write.um.umread.stash2standard_name)
cf.read_write.um.umread.stash2standard_name[(1, 4)]
cf.read_write.um.umread.stash2standard_name[(1, 2)]
cf.read_write.um.umread.stash2standard_name[(1, 7)]
(1, 999) in cf.read_write.um.umread.stash2standard_name
with open('new_STASH.txt', 'w') as new:
new.write('1!999!My STASH code!1!!!ultraviolet_index!!')
_ = cf.load_stash2standard_name('new_STASH.txt', merge=True)
cf.read_write.um.umread.stash2standard_name[(1, 999)]
| 27.400794 | 95 | 0.656799 |
4a27790d060559740bc7576e9deaba8d430efab6 | 1,083 | py | Python | api/logger.py | IFRCGo/go-api | 6acd84df479f0cf46553029ababa1e7753f86550 | [
"MIT"
] | 11 | 2018-06-11T06:05:12.000Z | 2022-03-25T09:31:44.000Z | api/logger.py | IFRCGo/go-api | 6acd84df479f0cf46553029ababa1e7753f86550 | [
"MIT"
] | 498 | 2017-11-07T21:20:13.000Z | 2022-03-31T14:37:18.000Z | api/logger.py | IFRCGo/go-api | 6acd84df479f0cf46553029ababa1e7753f86550 | [
"MIT"
] | 6 | 2018-04-11T13:29:50.000Z | 2020-07-16T16:52:11.000Z | import logging
import sys
import os
from azure_storage_logging.handlers import BlobStorageTimedRotatingFileHandler as storage
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
screen_handler = logging.StreamHandler(stream=sys.stdout)
screen_handler.setFormatter(formatter)
logger = logging.getLogger('api')
logger.setLevel('DEBUG')
logger.addHandler(screen_handler)
if (os.environ.get('AZURE_STORAGE_ACCOUNT') is not None and
os.environ.get('AZURE_STORAGE_KEY') is not None):
handler = storage(account_name=os.environ.get('AZURE_STORAGE_ACCOUNT'),
account_key=os.environ.get('AZURE_STORAGE_KEY'),
filename='go.log',
when='M',
interval=90,
container='logs',
encoding='utf-8'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
else:
logger.warning('No Azure credentials found, falling back to local logs.')
| 36.1 | 89 | 0.636196 |
4a27797083dfc1ed62152cbbd9ac1da9b521bc15 | 562 | py | Python | Wrappers/Python/Testing/Functions/loadSBML.py | gregmedlock/roadrunnerwork | 11f18f78ef3e381bc59c546a8d5e3ed46d8ab596 | [
"Apache-2.0"
] | null | null | null | Wrappers/Python/Testing/Functions/loadSBML.py | gregmedlock/roadrunnerwork | 11f18f78ef3e381bc59c546a8d5e3ed46d8ab596 | [
"Apache-2.0"
] | null | null | null | Wrappers/Python/Testing/Functions/loadSBML.py | gregmedlock/roadrunnerwork | 11f18f78ef3e381bc59c546a8d5e3ed46d8ab596 | [
"Apache-2.0"
] | null | null | null | import rrPython
import os
import csv
os.chdir('C:\\RoadRunner\\bin')
function = 'loadSBML'
file = open('C:\\RoadRunner\\Models\\feedback.xml','r').read()
#rrPython.loadSBML(file)
try:
sbml = rrPython.loadSBML(file)
if str(sbml) is not False:
result = 'True'
else:
result = 'False'
except:
result = 'False'
PythonTestResults = open('C:\\RoadRunner\\PythonTestResults.csv','a')
writer = csv.writer(PythonTestResults)
writevar = function + '=' + result
writer.writerow([writevar])
PythonTestResults.close() | 23.416667 | 70 | 0.654804 |
4a27797eb2a704f0036582dbcb245ae770e1b82d | 291 | py | Python | lexrank/mappings/stopwords.py | Berndzz/lexrank | 07bdd1579c408cf73cc822da303734d0a70cf3f7 | [
"MIT"
] | 99 | 2018-11-01T08:05:48.000Z | 2022-03-09T17:45:07.000Z | lexrank/mappings/stopwords.py | Berndzz/lexrank | 07bdd1579c408cf73cc822da303734d0a70cf3f7 | [
"MIT"
] | 4 | 2020-02-27T14:16:25.000Z | 2022-02-16T14:38:49.000Z | lexrank/mappings/stopwords.py | Berndzz/lexrank | 07bdd1579c408cf73cc822da303734d0a70cf3f7 | [
"MIT"
] | 33 | 2018-12-19T05:08:34.000Z | 2022-02-09T17:29:52.000Z | import gzip
import json
from lexrank import settings
file = settings.ASSETS_ROOT / 'stopwords.json.gz'
with gzip.open(file, mode='rt', encoding='utf-8') as fp:
_STOPWORDS = json.load(fp)
STOPWORDS = {}
for lang, stopwords in _STOPWORDS.items():
STOPWORDS[lang] = set(stopwords)
| 19.4 | 56 | 0.714777 |
4a27798ba3251ce8d5186df588c15db1b2904d7d | 638 | py | Python | decorator/annotation.py | guwenbo/DesignPattern-Python | 3b7228595e9dd6ae5cb0df06b2b8a74ce2d774a9 | [
"Apache-2.0"
] | null | null | null | decorator/annotation.py | guwenbo/DesignPattern-Python | 3b7228595e9dd6ae5cb0df06b2b8a74ce2d774a9 | [
"Apache-2.0"
] | null | null | null | decorator/annotation.py | guwenbo/DesignPattern-Python | 3b7228595e9dd6ae5cb0df06b2b8a74ce2d774a9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
装饰器模式 (基于注解)
"""
from functools import wraps
from datetime import datetime
import time
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
begin = datetime.now()
result = func(*args, **kwargs)
end = datetime.now()
print("function [%s] executes for %d seconds" % (func.__name__, (end - begin).seconds))
return result
return wrapper
@decorate
def func(*args, **kwargs):
print("Do something ...")
sec = 3
print("Sleeping for %d seconds" % sec)
time.sleep(sec)
if __name__ == '__main__':
func()
| 18.228571 | 95 | 0.595611 |
4a277a1412d025076b481251f5f043cfe0fbc5be | 2,510 | py | Python | simpleblog/models.py | jchoude/django-simple-blog | 1081496ed0210691e27eab5c9ee3917ed08ebf7f | [
"BSD-2-Clause"
] | null | null | null | simpleblog/models.py | jchoude/django-simple-blog | 1081496ed0210691e27eab5c9ee3917ed08ebf7f | [
"BSD-2-Clause"
] | null | null | null | simpleblog/models.py | jchoude/django-simple-blog | 1081496ed0210691e27eab5c9ee3917ed08ebf7f | [
"BSD-2-Clause"
] | null | null | null | from __future__ import unicode_literals
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_save
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from .signals import save_comment
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=200, verbose_name=_("title"))
slug = models.SlugField()
bodytext = models.TextField(verbose_name=_("message"))
post_date = models.DateTimeField(
auto_now_add=True, verbose_name=_("post date"))
modified = models.DateTimeField(null=True, verbose_name=_("modified"))
posted_by = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name=_("posted by"))
allow_comments = models.BooleanField(
default=True, verbose_name=_("allow comments"))
comment_count = models.IntegerField(
blank=True, default=0, verbose_name=_('comment count'))
class Meta:
verbose_name = _('post')
verbose_name_plural = _('posts')
ordering = ['-post_date']
def __str__(self):
return self.title
def get_absolute_url(self):
kwargs = {
'slug': self.slug,
'year': '%04d' % self.post_date.year,
'month': '%02d' % self.post_date.month,
'day': '%02d' % self.post_date.day,
}
return reverse('blog_detail', kwargs=kwargs)
@python_2_unicode_compatible
class Comment(models.Model):
post = models.ForeignKey(
Post, related_name='comments', verbose_name=_("post"))
bodytext = models.TextField(verbose_name=_("message"))
post_date = models.DateTimeField(
auto_now_add=True, verbose_name=_("post date"))
ip_address = models.GenericIPAddressField(
default='0.0.0.0', verbose_name=_("ip address"))
user = models.ForeignKey(
settings.AUTH_USER_MODEL, null=True, blank=True,
verbose_name=_("user"), related_name='comment_user')
user_name = models.CharField(
max_length=50, default='anonymous', verbose_name=_("user name"))
user_email = models.EmailField(blank=True, verbose_name=_("user email"))
def __str__(self):
return self.bodytext
class Meta:
verbose_name = _('comment')
verbose_name_plural = _('comments')
ordering = ['post_date']
post_save.connect(save_comment, sender=Comment)
| 32.597403 | 76 | 0.679283 |
4a277a3496b5bed9ed845782fca7b51080c30a17 | 6,045 | py | Python | testscripts/RDKB/component/WIFIHAL/TS_WIFIHAL_5GHzGetApName.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/WIFIHAL/TS_WIFIHAL_5GHzGetApName.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/WIFIHAL/TS_WIFIHAL_5GHzGetApName.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2018 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>1</version>
<name>TS_WIFIHAL_5GHzGetApName</name>
<primitive_test_id/>
<primitive_test_name>WIFIHAL_GetOrSetParamStringValue</primitive_test_name>
<primitive_test_version>8</primitive_test_version>
<status>FREE</status>
<synopsis>To get the access point name for access point 1</synopsis>
<groups_id/>
<execution_time>1</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
<box_type>Emulator</box_type>
<box_type>RPI</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_WIFIHAL_171</test_case_id>
<test_objective>To get the access point name for access point 1</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband, Emulator, RPI</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>wifi_getApName()</api_or_interface_used>
<input_parameters>methodName : getApName
ApIndex : 1</input_parameters>
<automation_approch>1. Load wifihal module
2. Using WIFIHAL_GetOrSetParamStringValue invoke wifi_getApName()
3. The api returns a string "ath" succeeded by the ApIndex
4. Depending upon the values return SUCCESS or FAILURE
5. Unload wifihal module</automation_approch>
<except_output>Should return ath1</except_output>
<priority>High</priority>
<test_stub_interface>WIFIHAL </test_stub_interface>
<test_script>TS_WIFIHAL_5GHzGetApName</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from wifiUtility import *;
from tdkbVariables import *;
radio = "5G"
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("wifihal","1");
sysobj = tdklib.TDKScriptingLibrary("sysutil","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WIFIHAL_5GHzGetApName');
sysobj.configureTestCase(ip,port,'TS_WIFIHAL_5GHzGetApName');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
sysloadmodulestatus =sysobj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %sysloadmodulestatus ;
print "[LIB LOAD STATUS] : %s" %loadmodulestatus ;
if "SUCCESS" in loadmodulestatus.upper() and "SUCCESS" in sysloadmodulestatus.upper():
obj.setLoadModuleStatus("SUCCESS");
tdkTestObjTemp, idx = getIndex(obj, radio);
## Check if a invalid index is returned
if idx == -1:
print "Failed to get radio index for radio %s\n" %radio;
tdkTestObjTemp.setResultStatus("FAILURE");
else:
tdkTestObj = sysobj.createTestStep('ExecuteCmd');
expectedresult="SUCCESS";
accesspointname = "sh %s/tdk_utility.sh parseConfigFile AP_IF_NAME_5G" %TDK_PATH;
print "query:%s" %accesspointname
tdkTestObj.addParameter("command", accesspointname);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
accesspointname= tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in actualresult and accesspointname != "":
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the 5GHZ Access Point name from properties file";
print "ACTUAL RESULT 1:Access point name: %s" %accesspointname;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
getMethod = "getApName"
apIndex = idx
primitive = 'WIFIHAL_GetOrSetParamStringValue'
#Calling the method from wifiUtility to execute test case and set result status for the test.
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, "0", getMethod)
apName = details.split(":")[1].strip()
if expectedresult in actualresult and accesspointname == apName:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Get the 5GHZ Access Point name";
print "ACTUAL RESULT 2: %s" %apName;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Get the 5GHZ Access Point name";
print "ACTUAL RESULT 2: %s" %apName;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "FAILURE: Failed to get the value of 5GHZ access point name from tdk_platform.properties file"
obj.unloadModule("wifihal");
sysobj.unloadModule("sysutil");
else:
print "Failed to load wifi module";
obj.setLoadModuleStatus("FAILURE");
| 39.509804 | 157 | 0.722746 |
4a277b1ddb3f786b5b077f938f37f6835c2cab55 | 1,168 | py | Python | test2.py | ragulkesavan/Hand-Cricket | 216822fa3a0d361bdd41d55b104b63664510d753 | [
"MIT"
] | null | null | null | test2.py | ragulkesavan/Hand-Cricket | 216822fa3a0d361bdd41d55b104b63664510d753 | [
"MIT"
] | null | null | null | test2.py | ragulkesavan/Hand-Cricket | 216822fa3a0d361bdd41d55b104b63664510d753 | [
"MIT"
] | null | null | null | import cv2
# Camera 0 is the integrated web cam on my netbook
camera_port = 0
# Number of frames to throw away while the camera adjusts to light levels
ramp_frames = 30
# Now we can initialize the camera capture object with the cv2.VideoCapture class.
# All it needs is the index to a camera port.
camera = cv2.VideoCapture(camera_port)
# Captures a single image from the camera and returns it in PIL format
def get_image():
# read is the easiest way to get a full image out of a VideoCapture object.
retval, im = camera.read()
return im
# Ramp the camera - these frames will be discarded and are only used to allow v4l2
# to adjust light levels, if necessary
for i in range(ramp_frames):
temp = get_image()
print("Taking image...")
# Take the actual image we want to keep
camera_capture = get_image()
file = "test_image.png"
# A nice feature of the imwrite method is that it will automatically choose the
# correct format based on the file extension you provide. Convenient!
cv2.imwrite(file, camera_capture)
# You'll want to release the camera, otherwise you won't be able to create a new
# capture object until your script exits
del (camera) | 33.371429 | 82 | 0.755137 |
4a277b503c91c73bd2e8f935c9a849cffc1db871 | 1,327 | py | Python | lib/ult/config.py | DirtyHarryLYL/DJ-RN | 7f362693f433d49e2fb5c0fc6216218d464c612b | [
"Apache-2.0"
] | 96 | 2020-03-07T09:23:56.000Z | 2022-01-05T08:31:59.000Z | lib/ult/config.py | DirtyHarryLYL/DJ-RN | 7f362693f433d49e2fb5c0fc6216218d464c612b | [
"Apache-2.0"
] | 71 | 2020-04-21T07:25:51.000Z | 2022-03-01T12:17:06.000Z | lib/ult/config.py | DirtyHarryLYL/DJ-RN | 7f362693f433d49e2fb5c0fc6216218d464c612b | [
"Apache-2.0"
] | 14 | 2020-04-20T08:53:10.000Z | 2022-02-27T05:24:23.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import numpy as np
from easydict import EasyDict as edict
__C = edict()
cfg = __C
__C.TRAIN = edict()
__C.TRAIN_MODULE = 1
__C.TRAIN_MODULE_UPDATE = 1
__C.TRAIN_INIT_WEIGHT = 3
__C.TRAIN_MODULE_CONTINUE = 2
__C.TRAIN.LEARNING_RATE = 0.0001
__C.TRAIN_DROP_OUT_BINARY = 0.8
__C.TRAIN.SNAPSHOT_ITERS = 100000
__C.TRAIN.MOMENTUM = 0.9
__C.TRAIN.WEIGHT_DECAY = 0.0005
__C.TRAIN.GAMMA = 0.96
__C.TRAIN.STEPSIZE = 20000
__C.TRAIN.SNAPSHOT_KEPT = None
__C.TRAIN.DISPLAY = 10
__C.TRAIN.SUMMARY_INTERVAL = 200
__C.RESNET = edict()
__C.RESNET.MAX_POOL = False
__C.RESNET.FIXED_BLOCKS = 1
__C.LR_DECAY = edict()
__C.LR_DECAY.TYPE = 'none'
__C.LR_DECAY.STEPS = 5.0
__C.LR_DECAY.T_MUL = 2.0
__C.LR_DECAY.M_MUL = 1.0
__C.LANG_NOISE = 0
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
__C.RNG_SEED = 3
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'Data'))
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'Data', 'smplx_res'))
__C.EXP_DIR = 'default'
__C.USE_GPU_NMS = True
__C.POOLING_MODE = 'crop'
__C.POOLING_SIZE = 7
__C.ANCHOR_SCALES = [8,16,32]
__C.ANCHOR_RATIOS = [0.5,1,2]
__C.RPN_CHANNELS = 512
| 26.019608 | 71 | 0.750565 |
4a277b62376927e17c525ee41b8fd28efd110544 | 163 | py | Python | python/gen_randon_lst.py | torao-1892/testes | 0452283355533ee68de0e84ad7d48c1aed03571c | [
"MIT"
] | null | null | null | python/gen_randon_lst.py | torao-1892/testes | 0452283355533ee68de0e84ad7d48c1aed03571c | [
"MIT"
] | null | null | null | python/gen_randon_lst.py | torao-1892/testes | 0452283355533ee68de0e84ad7d48c1aed03571c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import random
num = 1
count = 0
while count < 1000:
num = random.randint(1, 100000)
print("BRI %03d" % num)
count = count + 1 | 14.818182 | 35 | 0.613497 |
4a277bde34da8915fd8213c0bb37099246d3b91e | 1,187 | py | Python | hwt/hdl/types/integerCast.py | mgielda/hwt | e6c699fb154f93ac03523bfe40a3d4fc1912d28b | [
"MIT"
] | null | null | null | hwt/hdl/types/integerCast.py | mgielda/hwt | e6c699fb154f93ac03523bfe40a3d4fc1912d28b | [
"MIT"
] | null | null | null | hwt/hdl/types/integerCast.py | mgielda/hwt | e6c699fb154f93ac03523bfe40a3d4fc1912d28b | [
"MIT"
] | null | null | null | from hwt.hdl.operator import Operator
from hwt.hdl.operatorDefs import AllOps
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.boolVal import HBoolVal
from hwt.hdl.types.defs import BOOL
from hwt.hdl.types.hdlType import default_auto_cast_fn
from hwt.hdl.value import Value
from hwt.doc_markers import internal
@internal
def cast_integer(self, sigOrVal, toType):
isVal = isinstance(sigOrVal, Value)
if toType == BOOL:
if isVal:
v = int(bool(sigOrVal.val))
return HBoolVal(v, BOOL,
sigOrVal.vldMask,
sigOrVal.updateTime)
elif isinstance(toType, Bits):
if isVal:
_v = sigOrVal.val
w = toType.bit_length()
assert _v.bit_length() <= w,\
"%d can not fit into %d bits" % (_v, w)
v = toType.fromPy(_v)
v.updateTime = sigOrVal.updateTime
v._dtype = toType
if not sigOrVal.vldMask:
v.vldMask = 0
return v
else:
return Operator.withRes(AllOps.IntToBits, [sigOrVal], toType)
return default_auto_cast_fn(self, sigOrVal, toType)
| 31.236842 | 73 | 0.604886 |
4a277be7714bdaa880c567474e8f5b5f484959de | 6,780 | py | Python | tools/train.py | LRY89757/mmdetection | 8cd89a678fbde6c37eda3fa32af58b7b398abe41 | [
"Apache-2.0"
] | 2 | 2021-11-29T09:21:35.000Z | 2021-12-19T10:14:32.000Z | tools/train.py | LRY89757/mmdetection | 8cd89a678fbde6c37eda3fa32af58b7b398abe41 | [
"Apache-2.0"
] | null | null | null | tools/train.py | LRY89757/mmdetection | 8cd89a678fbde6c37eda3fa32af58b7b398abe41 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import init_random_seed, set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args() # 获得参数
cfg = Config.fromfile(args.config) # 读取有关的文件
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
seed = init_random_seed(args.seed)
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 36.451613 | 79 | 0.652065 |
4a277c93788a707efe5c9ec2856ce8c6f9fc3dc0 | 856 | py | Python | development/migrations/0059_auto_20170619_1150.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | development/migrations/0059_auto_20170619_1150.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | 11 | 2020-03-24T15:29:46.000Z | 2022-03-11T23:14:48.000Z | development/migrations/0059_auto_20170619_1150.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('development', '0058_delete_projectgroup'),
]
operations = [
migrations.AlterModelOptions(
name='developmentasset',
options={'verbose_name': 'Develoment File'},
),
migrations.AlterModelOptions(
name='developmentprojectasset',
options={'verbose_name': 'Development Project File'},
),
migrations.AlterField(
model_name='developmentproject',
name='consultation_stage',
field=models.ForeignKey(blank=True, null=True, to='development.ConsultationStage', on_delete=django.db.models.deletion.PROTECT),
),
]
| 29.517241 | 140 | 0.641355 |
4a277c9a1db0d35aa294bbd4d51166b6a27a94f9 | 2,222 | py | Python | tests/test_wsgi.py | ambrozic/http3 | 5442006a41f94a3e41186910d7a6e8546adf0f89 | [
"BSD-3-Clause"
] | null | null | null | tests/test_wsgi.py | ambrozic/http3 | 5442006a41f94a3e41186910d7a6e8546adf0f89 | [
"BSD-3-Clause"
] | null | null | null | tests/test_wsgi.py | ambrozic/http3 | 5442006a41f94a3e41186910d7a6e8546adf0f89 | [
"BSD-3-Clause"
] | null | null | null | import sys
import pytest
import http3
def hello_world(environ, start_response):
status = "200 OK"
output = b"Hello, World!"
response_headers = [
("Content-type", "text/plain"),
("Content-Length", str(len(output))),
]
start_response(status, response_headers)
return [output]
def echo_body(environ, start_response):
status = "200 OK"
output = environ["wsgi.input"].read()
response_headers = [
("Content-type", "text/plain"),
("Content-Length", str(len(output))),
]
start_response(status, response_headers)
return [output]
def echo_body_with_response_stream(environ, start_response):
status = "200 OK"
response_headers = [("Content-Type", "text/plain")]
start_response(status, response_headers)
def output_generator(f):
while True:
output = f.read(2)
if not output:
break
yield output
return output_generator(f=environ["wsgi.input"])
def raise_exc(environ, start_response):
status = "500 Server Error"
output = b"Nope!"
response_headers = [
("Content-type", "text/plain"),
("Content-Length", str(len(output))),
]
try:
raise ValueError()
except:
exc_info = sys.exc_info()
start_response(status, response_headers, exc_info=exc_info)
return [output]
def test_wsgi():
client = http3.Client(app=hello_world)
response = client.get("http://www.example.org/")
assert response.status_code == 200
assert response.text == "Hello, World!"
def test_wsgi_upload():
client = http3.Client(app=echo_body)
response = client.post("http://www.example.org/", data=b"example")
assert response.status_code == 200
assert response.text == "example"
def test_wsgi_upload_with_response_stream():
client = http3.Client(app=echo_body_with_response_stream)
response = client.post("http://www.example.org/", data=b"example")
assert response.status_code == 200
assert response.text == "example"
def test_wsgi_exc():
client = http3.Client(app=raise_exc)
with pytest.raises(ValueError):
response = client.get("http://www.example.org/")
| 23.145833 | 70 | 0.645815 |
4a277d54479faf78b386d39381052129329ec959 | 1,446 | py | Python | fields/validators.py | MilkBotttle/BFP | 0753d8476b50a5b3342dc49b127712b545e417fd | [
"MIT"
] | null | null | null | fields/validators.py | MilkBotttle/BFP | 0753d8476b50a5b3342dc49b127712b545e417fd | [
"MIT"
] | null | null | null | fields/validators.py | MilkBotttle/BFP | 0753d8476b50a5b3342dc49b127712b545e417fd | [
"MIT"
] | null | null | null | import ipaddress
from django.utils.translation import gettext_lazy as _, ngettext_lazy
from django.core.exceptions import ValidationError
def validate_ipv4_network(value):
try:
ipaddress.IPv4Network(value)
except ValueError:
raise ValidationError(_('Enter a valid IPv4 network.'), code='invalid')
def validate_ipv6_network(value):
try:
ipaddress.IPv6Network(value)
except ValueError:
raise ValidationError(_('Enter a valid IPv6 network.'), code='invalid')
def validate_ipv46_network(value):
try:
validate_ipv4_network(value)
except ValidationError:
try:
validate_ipv6_network(value)
except ValidationError:
raise ValidationError(_('Enter a valid IPv4 or IPv6 network.'), code='invalid')
ip_network_validator_map = {
'both': ([validate_ipv46_network], _('Enter a valid IPv4 or IPv6 network.')),
'ipv4': ([validate_ipv4_network], _('Enter a valid IPv4 network.')),
'ipv6': ([validate_ipv6_network], _('Enter a valid IPv6 network.')),
}
def ip_network_validators(protocol):
"""
Depending on the given parameters, return the appropriate validators for
the GenericIPNetworkField.
"""
try:
return ip_network_validator_map[protocol.lower()]
except KeyError:
raise ValueError("The protocol '%s' is unknown. Supported: %s"
% (protocol, list(ip_network_validator_map)))
| 34.428571 | 91 | 0.690871 |
4a277e55a1170b50990d8a4af93e157163793224 | 6,599 | py | Python | kali_extractor/kali_extractor/documents_processor.py | adipasquale/kali_dumps_scripts | 3a340627169ba42c461fa914f68d7cc1f9458075 | [
"Apache-2.0"
] | 3 | 2019-09-12T06:44:17.000Z | 2019-10-09T12:14:09.000Z | kali_extractor/kali_extractor/documents_processor.py | adipasquale/kali_dumps_scripts | 3a340627169ba42c461fa914f68d7cc1f9458075 | [
"Apache-2.0"
] | 7 | 2019-01-16T16:25:32.000Z | 2019-01-29T08:41:56.000Z | kali_extractor/kali_extractor/documents_processor.py | SocialGouv/kaligator | 3a340627169ba42c461fa914f68d7cc1f9458075 | [
"Apache-2.0"
] | 2 | 2019-01-22T06:04:29.000Z | 2019-01-23T07:49:30.000Z | from xml.etree import ElementTree
import html
from kali_extractor.custom_xml_parser import \
custom_xml_parser, custom_abdera_parser
from kali_extractor.dict_utils import deep_get, deep_set
class DocumentProcessor(object):
def __init__(
self, xml_path, html_fields=None, array_fields=None, **kwargs
):
self.xml_path = xml_path
self.html_fields = [] if html_fields is None else html_fields
self.array_fields = [] if array_fields is None else array_fields
def parse_xml(self):
"""
reads and parses the XML into a json-formatted dictionary object.
drops the root tag (<ARTICLE>, <IDCC> ...)
"""
with open(self.xml_path, "r") as f:
element = ElementTree.parse(f)
self.root = element.getroot()
parsed_root = custom_xml_parser.data(self.root)
root_keys = list(parsed_root.keys())
if len(root_keys) > 1:
raise Exception(
"parsed XML has more than one element at the root level: %s" %
",".join(root_keys)
)
self.json = parsed_root[root_keys[0]]
def format_array_fields(self):
"""
Enforce some fields to always be arrays, even with a single entry.
By default, you get a mixed schema, with single items as objects,
and multiple items as arrays
"""
for field in self.array_fields:
for value, selector in deep_get(self.json, field):
if value is not None and not isinstance(value, list):
deep_set(self.json, selector, [value])
def format_html_fields(self):
"""
Enforce some fields to contain their content as unformatted HTML.
By default, all the fields are parsed and split into objects but
we want to treat HTML content as raw text.
"""
for field in self.html_fields:
xpath_selector = "./%s" % "/".join(field.split("."))
value = self.root.find(xpath_selector)
if value is None:
continue
sub_strings = [
ElementTree.tostring(sub_element).decode("utf-8")
for sub_element in value
]
parsed_contenu_html = html.unescape("".join(sub_strings))
deep_set(self.json, field, parsed_contenu_html)
def process(self):
self.parse_xml()
self.format_html_fields()
self.format_array_fields()
return self.json
class ArticleProcessor(DocumentProcessor):
def __init__(self, xml_path, **kwargs):
super(ArticleProcessor, self).__init__(
xml_path,
html_fields=["BLOC_TEXTUEL/CONTENU", "NOTA/CONTENU"],
array_fields=["VERSIONS/VERSION", "LIENS/LIEN"],
**kwargs
)
class IDCCProcessor(DocumentProcessor):
def __init__(self, xml_path, **kwargs):
super(IDCCProcessor, self).__init__(
xml_path,
html_fields=[],
array_fields=[
"STRUCTURE_TXT/TM", "ACTS_PRO/ACT_PRO",
"NUMS_BROCH/NUM_BROCH", "STRUCTURE_TXT/TM/LIEN_TXT"
],
**kwargs
)
class SectionTaProcessor(DocumentProcessor):
def __init__(self, xml_path, **kwargs):
super(SectionTaProcessor, self).__init__(
xml_path,
html_fields=[],
array_fields=[
"STRUCTURE_TA/LIEN_ART", "STRUCTURE_TA/LIEN_SECTION_TA",
"CONTEXTE/TEXTE/TITRE_TXT", "CONTEXTE/TEXTE/TM/TITRE_TM"
],
**kwargs
)
def flatten_abdera_item(item):
"""
takes a dict parsed by the abdera algorithm and returns one
that is formatted like the rest of the XML parsed docs
(with top level attributes and a `_text` key)
"""
keys = list(item.keys())
if len(keys) != 1:
raise Exception("found %s abdera tag names instead of 1" % len(keys))
key = keys[0]
abdera_obj = item[key]
new_object = {"_type": key}
for name, value in abdera_obj.get("attributes").items():
new_object[name] = value
if "children" in abdera_obj:
if not isinstance(abdera_obj["children"], list):
raise Exception(
"children should be a list but was a %s" %
abdera_obj["children"].__class__
)
if len(abdera_obj["children"]) != 1:
raise Exception(
"children should contain a single item but has %s" %
len(abdera_obj["children"])
)
new_object["_text"] = abdera_obj["children"][0]
return new_object
class TexteVersionProcessor(DocumentProcessor):
def __init__(self, xml_path, **kwargs):
super(TexteVersionProcessor, self).__init__(
xml_path,
html_fields=[
"VISAS/CONTENU",
"SIGNATAIRES/SIGNPATRON/CONTENU",
"SIGNATAIRES/SIGNSYNDIC/CONTENU",
"SIGNATAIRES/ORGADHERE/CONTENU",
"SIGNATAIRES/ORGDENONCE/CONTENU",
"SIGNATAIRES/SIGNEXT/CONTENU",
"SIGNATAIRES/EXECUTION/CONTENU",
"NOTA/CONTENU",
],
**kwargs
)
class TexteStructProcessor(DocumentProcessor):
def __init__(self, xml_path, **kwargs):
super(TexteStructProcessor, self).__init__(
xml_path,
array_fields=["VERSIONS/VERSION"],
**kwargs
)
def parse_xml(self):
"""
slightly hacky, this fixes #6, as the STRUCT contains a mixed
list of two different tags, we cannot treat it as the array fields
"""
super(TexteStructProcessor, self).parse_xml()
if 'STRUCT' not in self.json:
return
doc = custom_abdera_parser.data(self.root)
subdocs = [
d for d in doc["TEXTEKALI"]["children"]
if ["STRUCT"] == list(d.keys())
]
if len(subdocs) != 1:
raise Exception(
"found %s STRUCT tags in TEXTEKALI instead of 1" % len(subdocs)
)
if subdocs[0]["STRUCT"].get("children"):
children = subdocs[0]["STRUCT"]["children"]
flat_children = [flatten_abdera_item(c) for c in children]
elif len(subdocs[0]["STRUCT"].keys()) == 1:
flat_children = [flatten_abdera_item(subdocs[0]["STRUCT"])]
elif not subdocs[0]["STRUCT"]:
flat_children = []
self.json["STRUCT"] = flat_children
| 35.67027 | 79 | 0.578421 |
Subsets and Splits